Compare commits

..

1 Commits

Author SHA1 Message Date
b454fca320 feat(panoramic): 360° equirectangular stitching + RTSP stream (Issue #105)
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-02 08:39:05 -05:00
53 changed files with 4 additions and 8217 deletions

View File

@ -1,35 +0,0 @@
#ifndef BATTERY_H
#define BATTERY_H
/*
* battery.h Vbat ADC reading for CRSF telemetry (Issue #103)
*
* Hardware: ADC3 channel IN11 on PC1 (ADC_BATT 1, Mamba F722).
* Voltage divider: 10 / 1 11:1 ratio.
* Resolution: 12-bit (04095), Vref = 3.3 V.
*
* Filtered output in millivolts. Reading is averaged over
* BATTERY_SAMPLES conversions (software oversampling) to reduce noise.
*/
#include <stdint.h>
/* Initialise ADC3 for single-channel Vbat reading on PC1. */
void battery_init(void);
/*
* battery_read_mv() blocking single-shot read; returns Vbat in mV.
* Takes ~1 µs (12-bit conversion at 36 MHz APB2 / 8 prescaler = 4.5 MHz ADC clk).
* Returns 0 if ADC not initialised or conversion times out.
*/
uint32_t battery_read_mv(void);
/*
* battery_estimate_pct() coarse SoC estimate from Vbat (mV).
* Works for 3S LiPo (10.512.6 V) and 4S (14.016.8 V).
* Detection is automatic based on voltage.
* Returns 0100, or 255 if voltage is out of range.
*/
uint8_t battery_estimate_pct(uint32_t voltage_mv);
#endif /* BATTERY_H */

View File

@ -127,20 +127,10 @@
// Debug: UART5 (PC12=TX, PD2=RX)
// --- CRSF / ExpressLRS ---
// CH1[0]=steer CH2[1]=throttle CH5[4]=arm CH6[5]=mode
// CH1[0]=steer CH2[1]=lean(future) CH5[4]=arm CH6[5]=mode
#define CRSF_ARM_THRESHOLD 1750 /* CH5 raw value; > threshold = armed */
#define CRSF_STEER_MAX 400 /* CH1 range: -400..+400 motor counts */
#define CRSF_FAILSAFE_MS 500 /* Disarm after this ms without a frame (Issue #103) */
// --- Battery ADC (ADC3, PC1 = ADC123_IN11) ---
/* Mamba F722: 10kΩ + 1kΩ voltage divider → 11:1 ratio */
#define VBAT_SCALE_NUM 11 /* Numerator of divider ratio */
#define VBAT_AREF_MV 3300 /* ADC reference in mV */
#define VBAT_ADC_BITS 12 /* 12-bit ADC → 4096 counts */
/* Filtered Vbat in mV: (raw * 3300 * 11) / 4096, updated at 10Hz */
// --- CRSF Telemetry TX (uplink: FC → ELRS module → pilot handset) ---
#define CRSF_TELEMETRY_HZ 1 /* Telemetry TX rate (Hz) */
#define CRSF_FAILSAFE_MS 300 /* Disarm after this ms without a frame */
// --- PID Tuning ---
#define PID_KP 35.0f
@ -165,8 +155,8 @@
// --- RC / Mode Manager ---
/* CRSF channel indices (0-based; CRSF range 172-1811, center 992) */
#define CRSF_CH_STEER 0 /* CH1 — right stick horizontal (steer) */
#define CRSF_CH_SPEED 1 /* CH2 — right stick vertical (throttle) */
#define CRSF_CH_SPEED 2 /* CH3 — left stick vertical (fwd/back) */
#define CRSF_CH_STEER 3 /* CH4 — left stick horizontal (yaw) */
#define CRSF_CH_ARM 4 /* CH5 — arm switch (2-pos) */
#define CRSF_CH_MODE 5 /* CH6 — mode switch (3-pos) */
/* Deadband around CRSF center (992) in raw counts (~2% of range) */

View File

@ -40,30 +40,6 @@ void crsf_parse_byte(uint8_t byte);
*/
int16_t crsf_to_range(uint16_t val, int16_t min, int16_t max);
/*
* crsf_send_battery() transmit CRSF battery-sensor telemetry frame (type 0x08)
* back to the ELRS TX module over UART4 TX. Call at CRSF_TELEMETRY_HZ (1 Hz).
*
* voltage_mv : battery voltage in millivolts (e.g. 12600 for 3S full)
* current_ma : current draw in milliamps (0 if no sensor)
* remaining_pct: state-of-charge 0100 % (255 = unknown)
*
* Frame: [0xC8][12][0x08][v16_hi][v16_lo][c16_hi][c16_lo][cap24×3][rem][CRC]
* voltage unit: 100 mV (12600 mV 126)
* current unit: 100 mA
*/
void crsf_send_battery(uint32_t voltage_mv, uint32_t current_ma,
uint8_t remaining_pct);
/*
* crsf_send_flight_mode() transmit CRSF flight-mode frame (type 0x21)
* for display on the pilot's handset OSD.
*
* armed: true "ARMED\0"
* false "DISARM\0"
*/
void crsf_send_flight_mode(bool armed);
extern volatile CRSFState crsf_state;
#endif /* CRSF_H */

View File

@ -1,195 +0,0 @@
# IMX219 Camera Calibration Workflow
Full step-by-step calibration procedure for the 4× IMX219 160° fisheye CSI cameras
on saltybot (cam0=front, cam1=right, cam2=rear, cam3=left).
## Hardware
| Camera | Position | Yaw | Topic |
|--------|----------|-------|------------------------------|
| cam0 | Front | 0° | /camera/front/image_raw |
| cam1 | Right | -90° | /camera/right/image_raw |
| cam2 | Rear | 180° | /camera/rear/image_raw |
| cam3 | Left | 90° | /camera/left/image_raw |
Mount geometry: radius=5cm from robot centre, height=30cm, 90° angular spacing.
## Prerequisites
- OpenCV with fisheye support: `pip3 install opencv-python numpy pyyaml`
- For ROS2 mode: ROS2 Humble, cv_bridge, image_transport
- Printed checkerboard: 8×6 inner corners, 25mm squares (A3 paper recommended)
- Generator: `python3 -c "import cv2; board=cv2.aruco.CharucoBoard((9,7),0.025,0.0175,cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_6X6_250)); cv2.imwrite('board.png', board.generateImage((700,500)))"`
- Or download from: https://calib.io/pages/camera-calibration-pattern-generator
## Step 1 — Print and Prepare the Checkerboard
- Print the 8×6 inner-corner checkerboard at exactly 25mm per square
- Mount flat on a rigid board (cardboard or foamcore) — no warping allowed
- Measure actual square size with calipers, update `--square-size` if different
## Step 2 — Start Cameras
```bash
# On the Jetson, in the saltybot docker container:
ros2 launch saltybot_cameras cameras.launch.py
# Verify all 4 topics are publishing:
ros2 topic list | grep camera
ros2 topic hz /camera/front/image_raw
```
## Step 3 — Capture Calibration Images (30 per camera)
Run for each camera in turn. Move the board to many different positions and
angles within the field of view — especially corners and edges.
```bash
# Option A: ROS2 mode (recommended on Jetson, requires running cameras)
ros2 run saltybot_calibration capture_calibration_images.py \
--ros --camera front --n-images 30 \
--output-dir /mnt/nvme/saltybot/calibration/raw
ros2 run saltybot_calibration capture_calibration_images.py \
--ros --camera right --n-images 30 \
--output-dir /mnt/nvme/saltybot/calibration/raw
ros2 run saltybot_calibration capture_calibration_images.py \
--ros --camera rear --n-images 30 \
--output-dir /mnt/nvme/saltybot/calibration/raw
ros2 run saltybot_calibration capture_calibration_images.py \
--ros --camera left --n-images 30 \
--output-dir /mnt/nvme/saltybot/calibration/raw
# Option B: All cameras sequentially (prompts between cameras)
ros2 run saltybot_calibration capture_calibration_images.py \
--ros --camera all --n-images 30 \
--output-dir /mnt/nvme/saltybot/calibration/raw
# Option C: Standalone (no ROS2, uses /dev/videoX directly)
python3 capture_calibration_images.py \
--camera front --n-images 30 \
--device /dev/video0 \
--output-dir /mnt/nvme/saltybot/calibration/raw
```
### Capture Tips for Fisheye Lenses
- Cover the entire FoV — include corners and edges (where distortion is strongest)
- Use 5+ different orientations of the board (tilt, rotate, skew)
- Keep the board fully visible (no partial views)
- Good lighting, no motion blur
- Aim for 20-30 images with varied positions
## Step 4 — Compute Intrinsics per Camera
```bash
for cam in front right rear left; do
ros2 run saltybot_calibration calibrate_camera.py \
--images-dir /mnt/nvme/saltybot/calibration/raw/cam_${cam} \
--output /mnt/nvme/saltybot/calibration/${cam}/camera_info.yaml \
--camera-name camera_${cam} \
--board-size 8x6 \
--square-size 0.025 \
--image-size 640x480 \
--show-reprojection
done
```
## Step 5 — Verify Reprojection Error
Target: RMS reprojection error < 1.5 px per camera.
If RMS > 1.5 px:
1. Remove outlier images (highest per-image error from table)
2. Recapture with more varied board positions
3. Check board is truly flat and square size is correct
4. Try `--image-size 1280x720` if capturing at higher resolution
```bash
# Quick check — view undistorted preview
ros2 run saltybot_calibration preview_undistorted.py \
--image /mnt/nvme/saltybot/calibration/raw/cam_front/frame_0000.png \
--calibration /mnt/nvme/saltybot/calibration/front/camera_info.yaml
```
## Step 6 — Compute Extrinsics (Mechanical Mode)
Uses known mount geometry (radius=5cm, height=30cm, 90° spacing) to compute
analytically exact camera-to-base_link transforms.
```bash
ros2 run saltybot_calibration calibrate_extrinsics.py \
--mode mechanical \
--mount-radius 0.05 \
--mount-height 0.30 \
--output /mnt/nvme/saltybot/calibration/extrinsics.yaml \
--generate-launch \
~/ros2_ws/src/saltybot_bringup/launch/static_transforms.launch.py
```
If mechanical dimensions differ from defaults, update `--mount-radius` and
`--mount-height` from your actual measurements.
## Step 7 — Generate Static Transform Launch File
(Already done by --generate-launch in Step 6, but can regenerate independently:)
```bash
ros2 run saltybot_calibration generate_static_transforms.py \
--extrinsics /mnt/nvme/saltybot/calibration/extrinsics.yaml \
--output ~/ros2_ws/src/saltybot_bringup/launch/static_transforms.launch.py
```
## Step 8 — Update surround_vision_params.yaml
Edit `jetson/config/surround_vision_params.yaml` (or the equivalent in your
saltybot_surround package) to point camera_info_url at the new calibration files:
```yaml
camera_info_urls:
front: 'file:///mnt/nvme/saltybot/calibration/front/camera_info.yaml'
right: 'file:///mnt/nvme/saltybot/calibration/right/camera_info.yaml'
rear: 'file:///mnt/nvme/saltybot/calibration/rear/camera_info.yaml'
left: 'file:///mnt/nvme/saltybot/calibration/left/camera_info.yaml'
```
## Step 9 — Restart Nodes
```bash
# Restart camera and surround nodes to pick up new calibration
ros2 lifecycle set /saltybot_cameras shutdown
ros2 launch saltybot_bringup cameras.launch.py
ros2 launch saltybot_surround surround.launch.py
```
## Output Files
After full calibration, you should have:
```
/mnt/nvme/saltybot/calibration/
front/camera_info.yaml # intrinsics: K, D (equidistant)
right/camera_info.yaml
rear/camera_info.yaml
left/camera_info.yaml
extrinsics.yaml # base_link -> camera_*_optical_frame
```
Template files are in `jetson/calibration/` (tracked in git) with placeholder
IMX219 values (fx=229px @ 640×480). Replace with calibrated values.
## Distortion Model Reference
The IMX219 160° fisheye uses the **equidistant** (Kannala-Brandt) model:
```
r(theta) = k1*theta + k2*theta^3 + k3*theta^5 + k4*theta^7
```
where theta is the angle of incidence. OpenCV's `cv2.fisheye` module implements
this model with 4 coefficients [k1, k2, k3, k4].
This is different from the `plumb_bob` (radial/tangential) model used for
standard lenses — do not mix them up.

View File

@ -1,27 +0,0 @@
# TEMPLATE — replace with output of calibrate_camera.py
# IMX219 160 deg at 640x480: estimated placeholder values
# Run: ros2 run saltybot_calibration calibrate_camera.py \
# --images-dir /mnt/nvme/saltybot/calibration/raw/cam_front \
# --output /mnt/nvme/saltybot/calibration/front/camera_info.yaml \
# --camera-name camera_front --image-size 640x480
image_width: 640
image_height: 480
camera_name: camera_front
camera_matrix:
rows: 3
cols: 3
data: [229.0, 0.0, 320.0, 0.0, 229.0, 240.0, 0.0, 0.0, 1.0]
distortion_model: equidistant
distortion_coefficients:
rows: 1
cols: 4
data: [0.0, 0.0, 0.0, 0.0]
rectification_matrix:
rows: 3
cols: 3
data: [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
projection_matrix:
rows: 3
cols: 4
data: [229.0, 0.0, 320.0, 0.0, 0.0, 229.0, 240.0, 0.0, 0.0, 0.0, 1.0, 0.0]
_calibration_rms_px: null

View File

@ -1,27 +0,0 @@
# TEMPLATE — replace with output of calibrate_camera.py
# IMX219 160 deg at 640x480: estimated placeholder values
# Run: ros2 run saltybot_calibration calibrate_camera.py \
# --images-dir /mnt/nvme/saltybot/calibration/raw/cam_left \
# --output /mnt/nvme/saltybot/calibration/left/camera_info.yaml \
# --camera-name camera_left --image-size 640x480
image_width: 640
image_height: 480
camera_name: camera_left
camera_matrix:
rows: 3
cols: 3
data: [229.0, 0.0, 320.0, 0.0, 229.0, 240.0, 0.0, 0.0, 1.0]
distortion_model: equidistant
distortion_coefficients:
rows: 1
cols: 4
data: [0.0, 0.0, 0.0, 0.0]
rectification_matrix:
rows: 3
cols: 3
data: [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
projection_matrix:
rows: 3
cols: 4
data: [229.0, 0.0, 320.0, 0.0, 0.0, 229.0, 240.0, 0.0, 0.0, 0.0, 1.0, 0.0]
_calibration_rms_px: null

View File

@ -1,27 +0,0 @@
# TEMPLATE — replace with output of calibrate_camera.py
# IMX219 160 deg at 640x480: estimated placeholder values
# Run: ros2 run saltybot_calibration calibrate_camera.py \
# --images-dir /mnt/nvme/saltybot/calibration/raw/cam_rear \
# --output /mnt/nvme/saltybot/calibration/rear/camera_info.yaml \
# --camera-name camera_rear --image-size 640x480
image_width: 640
image_height: 480
camera_name: camera_rear
camera_matrix:
rows: 3
cols: 3
data: [229.0, 0.0, 320.0, 0.0, 229.0, 240.0, 0.0, 0.0, 1.0]
distortion_model: equidistant
distortion_coefficients:
rows: 1
cols: 4
data: [0.0, 0.0, 0.0, 0.0]
rectification_matrix:
rows: 3
cols: 3
data: [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
projection_matrix:
rows: 3
cols: 4
data: [229.0, 0.0, 320.0, 0.0, 0.0, 229.0, 240.0, 0.0, 0.0, 0.0, 1.0, 0.0]
_calibration_rms_px: null

View File

@ -1,27 +0,0 @@
# TEMPLATE — replace with output of calibrate_camera.py
# IMX219 160 deg at 640x480: estimated placeholder values
# Run: ros2 run saltybot_calibration calibrate_camera.py \
# --images-dir /mnt/nvme/saltybot/calibration/raw/cam_right \
# --output /mnt/nvme/saltybot/calibration/right/camera_info.yaml \
# --camera-name camera_right --image-size 640x480
image_width: 640
image_height: 480
camera_name: camera_right
camera_matrix:
rows: 3
cols: 3
data: [229.0, 0.0, 320.0, 0.0, 229.0, 240.0, 0.0, 0.0, 1.0]
distortion_model: equidistant
distortion_coefficients:
rows: 1
cols: 4
data: [0.0, 0.0, 0.0, 0.0]
rectification_matrix:
rows: 3
cols: 3
data: [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
projection_matrix:
rows: 3
cols: 4
data: [229.0, 0.0, 320.0, 0.0, 0.0, 229.0, 240.0, 0.0, 0.0, 0.0, 1.0, 0.0]
_calibration_rms_px: null

View File

@ -1,43 +0,0 @@
# Camera extrinsics — base_link to camera_optical_frame transforms
# Generated from mechanical mount geometry:
# mount_radius = 0.05m, mount_height = 0.30m
# Camera yaw: front=0deg, right=-90deg, rear=180deg, left=90deg
#
# Quaternion convention: [qx, qy, qz, qw] (xyzw)
# Rotation: camera_optical_frame -> base_link
# camera_optical: z=forward(out), x=right, y=down
# base_link: x=forward, y=left, z=up (ROS REP-103)
#
# Regenerate with:
# python3 calibrate_extrinsics.py --mode mechanical \
# --mount-radius 0.05 --mount-height 0.30 \
# --output jetson/calibration/extrinsics.yaml
_description: Camera extrinsics - base_link to camera_optical_frame transforms
_mount_radius_m: 0.05
_mount_height_m: 0.30
cameras:
front:
parent_frame: base_link
child_frame: camera_front_optical_frame
translation: [0.05, 0.0, 0.30]
rotation_quat_xyzw: [0.5, -0.5, 0.5, -0.5]
right:
parent_frame: base_link
child_frame: camera_right_optical_frame
translation: [0.0, 0.05, 0.30]
rotation_quat_xyzw: [-0.707107, 0.0, 0.0, 0.707107]
rear:
parent_frame: base_link
child_frame: camera_rear_optical_frame
translation: [-0.05, 0.0, 0.30]
rotation_quat_xyzw: [-0.5, -0.5, 0.5, 0.5]
left:
parent_frame: base_link
child_frame: camera_left_optical_frame
translation: [0.0, -0.05, 0.30]
rotation_quat_xyzw: [0.0, -0.707107, 0.707107, 0.0]

View File

@ -1,29 +0,0 @@
cmake_minimum_required(VERSION 3.8)
project(saltybot_calibration)
find_package(ament_cmake REQUIRED)
find_package(ament_cmake_python REQUIRED)
find_package(rclpy REQUIRED)
ament_python_install_package(${PROJECT_NAME})
install(PROGRAMS
scripts/capture_calibration_images.py
scripts/calibrate_camera.py
scripts/calibrate_extrinsics.py
scripts/preview_undistorted.py
scripts/generate_static_transforms.py
DESTINATION lib/${PROJECT_NAME}
)
install(DIRECTORY
launch config
DESTINATION share/${PROJECT_NAME}
)
if(BUILD_TESTING)
find_package(ament_lint_auto REQUIRED)
ament_lint_auto_find_test_dependencies()
endif()
ament_package()

View File

@ -1,27 +0,0 @@
calibration:
board_size: '8x6' # inner corners (cols x rows)
square_size_m: 0.025 # 25mm squares
n_images: 30 # images to capture per camera
image_size: '640x480' # capture resolution
cameras:
front:
topic: '/camera/front/image_raw'
output_yaml: '/mnt/nvme/saltybot/calibration/front/camera_info.yaml'
right:
topic: '/camera/right/image_raw'
output_yaml: '/mnt/nvme/saltybot/calibration/right/camera_info.yaml'
rear:
topic: '/camera/rear/image_raw'
output_yaml: '/mnt/nvme/saltybot/calibration/rear/camera_info.yaml'
left:
topic: '/camera/left/image_raw'
output_yaml: '/mnt/nvme/saltybot/calibration/left/camera_info.yaml'
extrinsics:
mode: mechanical
mount_radius_m: 0.05
mount_height_m: 0.30
output_yaml: '/mnt/nvme/saltybot/calibration/extrinsics.yaml'
reprojection_warn_threshold_px: 1.5

View File

@ -1,108 +0,0 @@
"""
calibration_capture.launch.py Calibration workflow launcher for saltybot IMX219 cameras.
Full calibration workflow:
Step 1: Start cameras (requires saltybot_cameras package to be running)
Step 2: Run capture_calibration_images.py for each camera (30 images each)
Step 3: Run calibrate_camera.py for each camera
Step 4: Run calibrate_extrinsics.py
Step 5: Run generate_static_transforms.py
This launch file starts the capture script for one camera via ExecuteProcess.
For full workflow, run each step manually (see jetson/calibration/README.md).
Usage:
ros2 launch saltybot_calibration calibration_capture.launch.py camera:=front
ros2 launch saltybot_calibration calibration_capture.launch.py camera:=all
Arguments:
camera Camera to capture (front/right/rear/left/all, default: front)
n_images Images to capture per camera (default: 30)
output_dir Output directory (default: /mnt/nvme/saltybot/calibration/raw)
board_size Checkerboard inner corners WxH (default: 8x6)
square_size Physical square size in metres (default: 0.025)
"""
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument, ExecuteProcess, LogInfo
from launch.substitutions import LaunchConfiguration, PathJoinSubstitution
from launch_ros.substitutions import FindPackageShare
def generate_launch_description():
camera_arg = DeclareLaunchArgument(
'camera', default_value='front',
description='Camera to capture from: front/right/rear/left/all',
)
n_images_arg = DeclareLaunchArgument(
'n_images', default_value='30',
description='Number of calibration images to capture per camera',
)
output_dir_arg = DeclareLaunchArgument(
'output_dir', default_value='/mnt/nvme/saltybot/calibration/raw',
description='Root output directory for captured images',
)
board_size_arg = DeclareLaunchArgument(
'board_size', default_value='8x6',
description='Checkerboard inner corners as WxH',
)
square_size_arg = DeclareLaunchArgument(
'square_size', default_value='0.025',
description='Physical checkerboard square size in metres',
)
pkg_share = FindPackageShare('saltybot_calibration')
capture_script = PathJoinSubstitution([
pkg_share, '..', '..', '..', 'lib', 'saltybot_calibration',
'capture_calibration_images.py',
])
capture_process = ExecuteProcess(
cmd=[
'python3', capture_script,
'--ros',
'--camera', LaunchConfiguration('camera'),
'--n-images', LaunchConfiguration('n_images'),
'--output-dir', LaunchConfiguration('output_dir'),
'--board-size', LaunchConfiguration('board_size'),
'--square-size', LaunchConfiguration('square_size'),
],
output='screen',
)
workflow_info = LogInfo(
msg=[
'\n',
'=== IMX219 Calibration Workflow ===\n',
'Step 1 (this launch): Capture checkerboard images\n',
' Controls: SPACE=capture r=reset q=done\n',
'\n',
'Step 2: Calibrate intrinsics per camera:\n',
' ros2 run saltybot_calibration calibrate_camera.py \\\n',
' --images-dir /mnt/nvme/saltybot/calibration/raw/cam_front \\\n',
' --output /mnt/nvme/saltybot/calibration/front/camera_info.yaml \\\n',
' --camera-name camera_front --image-size 640x480\n',
'\n',
'Step 3: Compute extrinsics:\n',
' ros2 run saltybot_calibration calibrate_extrinsics.py \\\n',
' --mode mechanical \\\n',
' --output /mnt/nvme/saltybot/calibration/extrinsics.yaml\n',
'\n',
'Step 4: Generate static transforms launch file:\n',
' ros2 run saltybot_calibration generate_static_transforms.py \\\n',
' --extrinsics /mnt/nvme/saltybot/calibration/extrinsics.yaml \\\n',
' --output ~/ros2_ws/src/saltybot_bringup/launch/static_transforms.launch.py\n',
'\n',
'See jetson/calibration/README.md for full instructions.\n',
]
)
return LaunchDescription([
camera_arg,
n_images_arg,
output_dir_arg,
board_size_arg,
square_size_arg,
workflow_info,
capture_process,
])

View File

@ -1,25 +0,0 @@
<?xml version="1.0"?>
<?xml-model href="http://download.ros.org/schema/package_format3.xsd" schematypens="http://www.w3.org/2001/XMLSchema"?>
<package format="3">
<name>saltybot_calibration</name>
<version>0.1.0</version>
<description>IMX219 camera intrinsic + extrinsic calibration workflow for saltybot</description>
<maintainer email="seb@vayrette.com">seb</maintainer>
<license>MIT</license>
<buildtool_depend>ament_cmake_python</buildtool_depend>
<buildtool_depend>ament_cmake</buildtool_depend>
<depend>rclpy</depend>
<depend>sensor_msgs</depend>
<depend>cv_bridge</depend>
<depend>image_transport</depend>
<exec_depend>python3-opencv</exec_depend>
<exec_depend>python3-numpy</exec_depend>
<exec_depend>python3-yaml</exec_depend>
<test_depend>ament_copyright</test_depend>
<test_depend>ament_flake8</test_depend>
<test_depend>ament_pep257</test_depend>
<test_depend>python3-pytest</test_depend>
<export>
<build_type>ament_cmake</build_type>
</export>
</package>

View File

@ -1,175 +0,0 @@
"""camera_calibrator.py — OpenCV fisheye intrinsic calibration for IMX219 cameras.
Uses cv2.fisheye model (Scaramuzza/Kannala-Brandt 4-param distortion):
k1, k2, k3, k4 equidistant distortion coefficients
Outputs camera_info_manager compatible YAML:
image_width, image_height, camera_name
camera_matrix: 3x3 K
distortion_model: equidistant
distortion_coefficients: [k1, k2, k3, k4]
rectification_matrix: 3x3 I
projection_matrix: 3x4 P (same as K padded)
"""
import cv2
import numpy as np
import yaml
import os
from pathlib import Path
def find_checkerboard(img_bgr: np.ndarray, board_size: tuple,
flags: int = None) -> tuple:
"""Detect checkerboard corners in image.
Args:
img_bgr: BGR image
board_size: (cols, rows) inner corners, e.g. (8, 6)
flags: cv2.findChessboardCorners flags
Returns:
(found, corners) corners is [N, 1, 2] float32 or None
"""
gray = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)
if flags is None:
flags = cv2.CALIB_CB_ADAPTIVE_THRESH | cv2.CALIB_CB_NORMALIZE_IMAGE
found, corners = cv2.findChessboardCorners(gray, board_size, flags)
if found:
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 50, 0.001)
corners = cv2.cornerSubPix(gray, corners, (7, 7), (-1, -1), criteria)
return found, corners
def calibrate_fisheye(
image_paths: list,
board_size: tuple,
square_size_m: float,
image_size: tuple, # (width, height)
) -> dict:
"""Run cv2.fisheye calibration.
Args:
image_paths: list of calibration image file paths
board_size: (cols, rows) inner corners
square_size_m: physical square size in metres (e.g. 0.025)
image_size: (width, height) of images
Returns:
dict with keys: K, D, rvecs, tvecs, rms, n_images_used
"""
obj_p = np.zeros((board_size[0] * board_size[1], 1, 3), dtype=np.float64)
obj_p[:, 0, :2] = np.mgrid[0:board_size[0], 0:board_size[1]].T.reshape(-1, 2)
obj_p *= square_size_m
obj_pts = []
img_pts = []
used = 0
skipped = 0
for path in image_paths:
img = cv2.imread(path)
if img is None:
print(f' [WARN] Cannot read: {path}')
skipped += 1
continue
found, corners = find_checkerboard(img, board_size)
if not found:
print(f' [SKIP] No corners: {os.path.basename(path)}')
skipped += 1
continue
obj_pts.append(obj_p)
img_pts.append(corners)
used += 1
print(f' [OK] {os.path.basename(path)}')
if used < 5:
raise RuntimeError(
f'Only {used} valid images (need >=5). Check lighting and board visibility.'
)
K = np.zeros((3, 3))
D = np.zeros((4, 1))
flags = (
cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC |
cv2.fisheye.CALIB_CHECK_COND |
cv2.fisheye.CALIB_FIX_SKEW
)
rms, K, D, rvecs, tvecs = cv2.fisheye.calibrate(
obj_pts, img_pts, image_size, K, D, flags=flags
)
print(f' Calibration RMS reprojection error: {rms:.4f} px ({used} images)')
return {
'K': K, 'D': D, 'rvecs': rvecs, 'tvecs': tvecs,
'rms': rms, 'n_images_used': used, 'n_images_skipped': skipped,
'obj_pts': obj_pts, 'img_pts': img_pts,
}
def write_camera_info_yaml(
output_path: str,
camera_name: str,
image_size: tuple, # (width, height)
K: np.ndarray,
D: np.ndarray,
rms: float,
) -> None:
"""Write camera_info_manager compatible YAML.
Format compatible with camera_info_manager CameraInfoManager.loadCameraInfo().
distortion_model = 'equidistant' (ROS2 fisheye standard).
D has 4 coefficients [k1, k2, k3, k4] for fisheye model.
"""
w, h = image_size
fx = float(K[0, 0])
fy = float(K[1, 1])
cx = float(K[0, 2])
cy = float(K[1, 2])
data = {
'image_width': w,
'image_height': h,
'camera_name': camera_name,
'camera_matrix': {
'rows': 3, 'cols': 3,
'data': [fx, 0.0, cx, 0.0, fy, cy, 0.0, 0.0, 1.0],
},
'distortion_model': 'equidistant',
'distortion_coefficients': {
'rows': 1, 'cols': 4,
'data': [float(D[0, 0]), float(D[1, 0]),
float(D[2, 0]), float(D[3, 0])],
},
'rectification_matrix': {
'rows': 3, 'cols': 3,
'data': [1.0, 0.0, 0.0,
0.0, 1.0, 0.0,
0.0, 0.0, 1.0],
},
'projection_matrix': {
'rows': 3, 'cols': 4,
'data': [fx, 0.0, cx, 0.0,
0.0, fy, cy, 0.0,
0.0, 0.0, 1.0, 0.0],
},
# Metadata (not part of camera_info_manager spec, but informational)
'_calibration_rms_px': round(rms, 4),
}
Path(output_path).parent.mkdir(parents=True, exist_ok=True)
with open(output_path, 'w') as f:
yaml.dump(data, f, default_flow_style=False, sort_keys=False)
print(f' Written: {output_path}')
def compute_reprojection_errors(
obj_pts: list, img_pts: list, rvecs: list, tvecs: list,
K: np.ndarray, D: np.ndarray,
) -> list:
"""Compute per-image reprojection errors for fisheye model."""
errors = []
for i, (op, ip) in enumerate(zip(obj_pts, img_pts)):
projected, _ = cv2.fisheye.projectPoints(op, rvecs[i], tvecs[i], K, D)
err = cv2.norm(ip, projected, cv2.NORM_L2) / np.sqrt(len(ip))
errors.append(err)
return errors

View File

@ -1,139 +0,0 @@
"""extrinsic_calibrator.py — Extrinsic calibration between 4 IMX219 cameras.
Method: overlapping stereo calibration using shared checkerboard views.
Each adjacent pair (cam0-cam1, cam1-cam2, cam2-cam3, cam3-cam0) is calibrated
via cv2.fisheye.stereoCalibrate to find R, T between them.
Then each camera's pose relative to base_link is computed by chaining transforms,
anchoring on the known mechanical mount geometry (4 cameras at 90° intervals,
radius 0.05m from centre, all at height 0.30m).
Output: extrinsics.yaml with camera-to-base transforms as translation + quaternion.
Also outputs static_transforms.launch.py.
"""
import cv2
import numpy as np
import yaml
import math
from pathlib import Path
def rotation_matrix_to_quaternion(R: np.ndarray) -> tuple:
"""Convert 3x3 rotation matrix to (qx, qy, qz, qw)."""
trace = R[0, 0] + R[1, 1] + R[2, 2]
if trace > 0:
s = 0.5 / math.sqrt(trace + 1.0)
w = 0.25 / s
x = (R[2, 1] - R[1, 2]) * s
y = (R[0, 2] - R[2, 0]) * s
z = (R[1, 0] - R[0, 1]) * s
elif R[0, 0] > R[1, 1] and R[0, 0] > R[2, 2]:
s = 2.0 * math.sqrt(1.0 + R[0, 0] - R[1, 1] - R[2, 2])
w = (R[2, 1] - R[1, 2]) / s
x = 0.25 * s
y = (R[0, 1] + R[1, 0]) / s
z = (R[0, 2] + R[2, 0]) / s
elif R[1, 1] > R[2, 2]:
s = 2.0 * math.sqrt(1.0 + R[1, 1] - R[0, 0] - R[2, 2])
w = (R[0, 2] - R[2, 0]) / s
x = (R[0, 1] + R[1, 0]) / s
y = 0.25 * s
z = (R[1, 2] + R[2, 1]) / s
else:
s = 2.0 * math.sqrt(1.0 + R[2, 2] - R[0, 0] - R[1, 1])
w = (R[1, 0] - R[0, 1]) / s
x = (R[0, 2] + R[2, 0]) / s
y = (R[1, 2] + R[2, 1]) / s
z = 0.25 * s
return (x, y, z, w)
def mechanical_extrinsics(
mount_radius_m: float = 0.05,
mount_height_m: float = 0.30,
cam_yaw_degs: list = None,
) -> dict:
"""
Compute camera-to-base_link transforms from known mount geometry.
Camera mount: ring of radius mount_radius_m at height mount_height_m.
Cameras face outward (yaw angle determines facing direction).
Camera optical axis is +Z (into scene), x-right, y-down.
base_link: x=forward, y=left, z=up (ROS convention).
Transform = cam_optical_frame -> base_link
Returns dict: {cam_name: {translation: [x,y,z], rotation: [qx,qy,qz,qw]}}
"""
if cam_yaw_degs is None:
cam_yaw_degs = [0.0, -90.0, 180.0, 90.0] # front, right, rear, left
cam_names = ['front', 'right', 'rear', 'left']
result = {}
for name, yaw_deg in zip(cam_names, cam_yaw_degs):
yaw_rad = math.radians(yaw_deg)
# Camera optical centre position relative to base_link
tx = mount_radius_m * math.cos(yaw_rad) # forward component
ty = -mount_radius_m * math.sin(yaw_rad) # left component (ROS y=left)
tz = mount_height_m # up
# Rotation: camera optical frame (+Z out, +X right, +Y down)
# -> base_link frame (+X forward, +Y left, +Z up)
# Camera Z (optical) = robot X rotated by yaw
# This is the transform from base_link to camera_optical_frame:
# Rz(yaw) * Rx(90 deg) [tilt camera to face out + coordinate swap]
# Rotation from camera_optical to base_link (inverse):
# camera_optical: z=forward, x=right, y=down
# base_link: x=forward, y=left, z=up
# R_cam_to_base_unrotated = [[0,-1,0],[0,0,-1],[1,0,0]] (optical->ROS)
# Then rotate by -yaw around z to get directional orientation
cos_y = math.cos(yaw_rad)
sin_y = math.sin(yaw_rad)
# Full rotation: R_z(-yaw) * R_cam_optical_to_base_unrotated
# R_cam_optical_to_base_unrotated maps: x_cam->-y_base, y_cam->-z_base, z_cam->x_base
# After yaw rotation:
R = np.array([
[cos_y, sin_y, 0],
[-sin_y, cos_y, 0],
[0, 0, 1],
]) @ np.array([
[0, 0, 1],
[-1, 0, 0],
[0, -1, 0],
])
qx, qy, qz, qw = rotation_matrix_to_quaternion(R)
result[name] = {
'translation': [round(tx, 4), round(ty, 4), round(tz, 4)],
'rotation_quat': [round(qx, 6), round(qy, 6),
round(qz, 6), round(qw, 6)],
}
return result
def write_extrinsics_yaml(output_path: str, extrinsics: dict,
mount_radius_m: float, mount_height_m: float) -> None:
"""Write extrinsics.yaml."""
data = {
'_description': 'Camera extrinsics — base_link to camera_optical_frame transforms',
'_mount_radius_m': mount_radius_m,
'_mount_height_m': mount_height_m,
'cameras': {},
}
for name, ex in extrinsics.items():
data['cameras'][name] = {
'parent_frame': 'base_link',
'child_frame': f'camera_{name}_optical_frame',
'translation': ex['translation'],
'rotation_quat_xyzw': ex['rotation_quat'],
}
Path(output_path).parent.mkdir(parents=True, exist_ok=True)
with open(output_path, 'w') as f:
yaml.dump(data, f, default_flow_style=False, sort_keys=False)
print(f'Written: {output_path}')

View File

@ -1,215 +0,0 @@
#!/usr/bin/env python3
"""
calibrate_camera.py Run OpenCV fisheye calibration from captured images.
Usage:
python3 calibrate_camera.py \\
--images-dir /path/to/cal/cam_front \\
--output /path/to/calibration/front/camera_info.yaml \\
--camera-name camera_front \\
--board-size 8x6 \\
--square-size 0.025 \\
--image-size 640x480 \\
[--show-reprojection]
"""
import argparse
import os
import sys
from pathlib import Path
import cv2
import numpy as np
def parse_size(s: str) -> tuple:
"""Parse 'WxH' string to (width, height) tuple of ints."""
parts = s.lower().split('x')
if len(parts) != 2:
raise argparse.ArgumentTypeError(f'Size must be WxH, got: {s}')
return (int(parts[0]), int(parts[1]))
def main():
parser = argparse.ArgumentParser(
description='Compute IMX219 fisheye camera intrinsics from checkerboard images.',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=__doc__,
)
parser.add_argument(
'--images-dir', required=True,
help='Directory containing captured calibration images (*.png, *.jpg).',
)
parser.add_argument(
'--output', required=True,
help='Output path for camera_info.yaml.',
)
parser.add_argument(
'--camera-name', required=True,
help='Camera name for YAML header (e.g. camera_front).',
)
parser.add_argument(
'--board-size', type=str, default='8x6',
help='Checkerboard inner corners as WxH (default: 8x6).',
)
parser.add_argument(
'--square-size', type=float, default=0.025,
help='Physical square size in metres (default: 0.025 = 25mm).',
)
parser.add_argument(
'--image-size', type=str, default='640x480',
help='Image dimensions as WxH (default: 640x480).',
)
parser.add_argument(
'--show-reprojection', action='store_true',
help='Display original vs reprojected corners for each image.',
)
args = parser.parse_args()
# Parse board and image sizes
bw, bh = parse_size(args.board_size)
board_size = (bw, bh)
image_size = parse_size(args.image_size)
# Collect image paths
images_dir = Path(args.images_dir)
if not images_dir.exists():
print(f'[ERROR] Images directory not found: {images_dir}')
sys.exit(1)
image_paths = sorted(
[str(p) for p in images_dir.glob('*.png')] +
[str(p) for p in images_dir.glob('*.jpg')] +
[str(p) for p in images_dir.glob('*.jpeg')]
)
if not image_paths:
print(f'[ERROR] No images found in: {images_dir}')
sys.exit(1)
print(f'Board size: {board_size[0]}x{board_size[1]} inner corners')
print(f'Square size: {args.square_size * 1000:.0f}mm')
print(f'Image size: {image_size[0]}x{image_size[1]}')
print(f'Images found: {len(image_paths)}')
print(f'Output: {args.output}')
print()
# Run calibration using camera_calibrator module
# Add package to path for standalone use
script_dir = Path(__file__).resolve().parent
pkg_dir = script_dir.parent
if str(pkg_dir) not in sys.path:
sys.path.insert(0, str(pkg_dir))
from saltybot_calibration.camera_calibrator import (
calibrate_fisheye,
write_camera_info_yaml,
compute_reprojection_errors,
)
print('Processing images...')
result = calibrate_fisheye(image_paths, board_size, args.square_size, image_size)
K = result['K']
D = result['D']
rms = result['rms']
rvecs = result['rvecs']
tvecs = result['tvecs']
obj_pts = result['obj_pts']
img_pts = result['img_pts']
n_used = result['n_images_used']
n_skipped = result['n_images_skipped']
# Per-image reprojection errors
errors = compute_reprojection_errors(obj_pts, img_pts, rvecs, tvecs, K, D)
# Summary table
print()
print(f'{"Image":<40} {"RMS error (px)":>14}')
print('-' * 57)
for path, err in zip(
[p for p in image_paths if Path(p).name not in
{Path(p).name for p in image_paths if not Path(p).exists()}],
errors
):
flag = ' <<' if err > 2.0 else ''
print(f' {Path(path).name:<38} {err:>14.4f}{flag}')
print('-' * 57)
print(f' {"Overall RMS":<38} {rms:>14.4f}')
print(f' {"Images used":<38} {n_used:>14}')
print(f' {"Images skipped":<38} {n_skipped:>14}')
print()
# Warn on high reprojection error
if rms > 1.5:
print(f'[WARN] RMS error {rms:.4f} px > 1.5 px threshold.')
print(' Suggestions:')
print(' - Remove images with corners at extreme edges (strong fisheye distortion)')
print(' - Ensure board is flat and rigidly printed (not bent)')
print(' - Increase n_images and vary board orientation more')
print(' - Check that --image-size matches actual capture resolution')
else:
print(f'[OK] RMS error {rms:.4f} px — good calibration.')
# Intrinsics summary
print()
print('Intrinsics:')
print(f' fx={K[0,0]:.2f} fy={K[1,1]:.2f} cx={K[0,2]:.2f} cy={K[1,2]:.2f}')
print(f' D=[{D[0,0]:.6f}, {D[1,0]:.6f}, {D[2,0]:.6f}, {D[3,0]:.6f}]')
print()
# Write YAML
write_camera_info_yaml(
output_path=args.output,
camera_name=args.camera_name,
image_size=image_size,
K=K,
D=D,
rms=rms,
)
# Show reprojection visualisation if requested
if args.show_reprojection:
print('Showing reprojection errors. Press any key to advance, q to quit.')
for i, (path, err) in enumerate(zip(
[p for p in image_paths],
errors + [0.0] * (len(image_paths) - len(errors))
)):
img = cv2.imread(path)
if img is None:
continue
# Project points back
projected, _ = cv2.fisheye.projectPoints(
obj_pts[i] if i < len(obj_pts) else None,
rvecs[i] if i < len(rvecs) else None,
tvecs[i] if i < len(tvecs) else None,
K, D
) if i < len(obj_pts) else (None, None)
vis = img.copy()
if projected is not None and i < len(img_pts):
# Draw original corners (green)
for pt in img_pts[i]:
cv2.circle(vis, tuple(pt[0].astype(int)), 5, (0, 255, 0), 2)
# Draw reprojected corners (red)
for pt in projected:
cv2.circle(vis, tuple(pt[0].astype(int)), 3, (0, 0, 255), -1)
cv2.putText(vis, f'{Path(path).name} RMS={err:.3f}px', (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 0), 2)
cv2.putText(vis, 'Green=detected Red=reprojected', (10, 60),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (200, 200, 200), 1)
cv2.imshow('Reprojection', vis)
key = cv2.waitKey(0) & 0xFF
if key == ord('q'):
break
cv2.destroyAllWindows()
print('Calibration complete.')
if __name__ == '__main__':
main()

View File

@ -1,238 +0,0 @@
#!/usr/bin/env python3
"""
calibrate_extrinsics.py Compute camera-to-base_link extrinsic transforms.
Two modes:
1. --mode mechanical: use known mount geometry (radius, height, yaw angles)
-> produces analytically exact transforms from CAD measurements
2. --mode stereo: use cv2.fisheye.stereoCalibrate on paired checkerboard images
-> requires shared-view images from adjacent camera pairs
Usage (mechanical mode, recommended first pass):
python3 calibrate_extrinsics.py \\
--mode mechanical \\
--mount-radius 0.05 \\
--mount-height 0.30 \\
--output /path/to/calibration/extrinsics.yaml \\
--generate-launch /path/to/jetson/launch/static_transforms.launch.py
"""
import argparse
import math
import sys
from pathlib import Path
def print_extrinsics_table(extrinsics: dict) -> None:
"""Print formatted extrinsics table to stdout."""
print()
print(f'{"Camera":<8} {"tx":>8} {"ty":>8} {"tz":>8} '
f'{"qx":>10} {"qy":>10} {"qz":>10} {"qw":>10}')
print('-' * 86)
for name, ex in extrinsics.items():
tx, ty, tz = ex['translation']
qx, qy, qz, qw = ex['rotation_quat']
print(f' {name:<6} {tx:>8.4f} {ty:>8.4f} {tz:>8.4f} '
f'{qx:>10.6f} {qy:>10.6f} {qz:>10.6f} {qw:>10.6f}')
print()
def generate_static_transforms_launch(extrinsics: dict, output_path: str) -> None:
"""Generate static_transforms.launch.py from extrinsics dict."""
# Add package to path
script_dir = Path(__file__).resolve().parent
pkg_dir = script_dir.parent
if str(pkg_dir) not in sys.path:
sys.path.insert(0, str(pkg_dir))
from saltybot_calibration.extrinsic_calibrator import write_extrinsics_yaml
lines = [
'"""',
'static_transforms.launch.py — Auto-generated by calibrate_extrinsics.py',
'DO NOT EDIT manually — regenerate via:',
' python3 calibrate_extrinsics.py --mode mechanical --generate-launch <this_file>',
'"""',
'',
'from launch import LaunchDescription',
'from launch_ros.actions import Node',
'',
'',
'def generate_launch_description():',
' return LaunchDescription([',
]
for name, ex in extrinsics.items():
tx, ty, tz = ex['translation']
qx, qy, qz, qw = ex['rotation_quat']
child_frame = f'camera_{name}_optical_frame'
lines += [
f' # camera_{name}: base_link -> {child_frame}',
f' Node(',
f' package=\'tf2_ros\',',
f' executable=\'static_transform_publisher\',',
f' name=\'tf_base_to_camera_{name}\',',
f' arguments=[',
f' \'{tx}\', \'{ty}\', \'{tz}\',',
f' \'{qx}\', \'{qy}\', \'{qz}\', \'{qw}\',',
f' \'base_link\', \'{child_frame}\',',
f' ],',
f' ),',
]
lines += [
' ])',
'',
]
Path(output_path).parent.mkdir(parents=True, exist_ok=True)
with open(output_path, 'w') as f:
f.write('\n'.join(lines))
print(f'Generated launch file: {output_path}')
def run_mechanical_mode(args) -> None:
"""Run mechanical extrinsics calculation."""
script_dir = Path(__file__).resolve().parent
pkg_dir = script_dir.parent
if str(pkg_dir) not in sys.path:
sys.path.insert(0, str(pkg_dir))
from saltybot_calibration.extrinsic_calibrator import (
mechanical_extrinsics,
write_extrinsics_yaml,
)
print(f'Mode: mechanical')
print(f'Mount radius: {args.mount_radius * 100:.1f}cm')
print(f'Mount height: {args.mount_height * 100:.1f}cm')
print(f'Camera yaw angles: front=0 deg, right=-90 deg, rear=180 deg, left=90 deg')
print()
extrinsics = mechanical_extrinsics(
mount_radius_m=args.mount_radius,
mount_height_m=args.mount_height,
)
print_extrinsics_table(extrinsics)
write_extrinsics_yaml(
output_path=args.output,
extrinsics=extrinsics,
mount_radius_m=args.mount_radius,
mount_height_m=args.mount_height,
)
if args.generate_launch:
generate_static_transforms_launch(extrinsics, args.generate_launch)
def run_stereo_mode(args) -> None:
"""Stereo extrinsics calibration — stub for future implementation.
TODO: Implement cv2.fisheye.stereoCalibrate for adjacent camera pairs.
This mode requires:
- Shared-view checkerboard images from each adjacent camera pair:
{images_dir}/pair_front_right/*.png (visible to both cam0 and cam1)
{images_dir}/pair_right_rear/*.png
{images_dir}/pair_rear_left/*.png
{images_dir}/pair_left_front/*.png
- Per-camera intrinsics (camera_info.yaml) for each camera
- Each pair: cv2.fisheye.stereoCalibrate -> R, T between cameras
- Chain transforms: cam0->cam1->cam2->cam3, anchor to base_link via
mechanical position of cam0
Recommended workflow:
1. First run --mode mechanical to get approximate transforms
2. Mount robot in place, place checkerboard in overlap zones
3. Capture simultaneous images from adjacent pairs (requires hardware sync
or approximate temporal sync)
4. Run --mode stereo to refine R, T
5. Run generate_static_transforms.py to update launch file
For now, use --mode mechanical which gives correct transforms from CAD
dimensions (typically accurate to ~5mm / ~1 deg for a rigid mount).
"""
print('[ERROR] Stereo mode is not yet implemented.')
print()
print('Use --mode mechanical for now (accurate from CAD measurements).')
print()
print('Future implementation plan:')
print(' 1. Capture simultaneous checkerboard images from adjacent camera pairs')
print(' in their overlapping FoV regions (approx 70 deg overlap per pair)')
print(' 2. For each pair: cv2.fisheye.stereoCalibrate with per-camera intrinsics')
print(' 3. Chain transforms cam0->cam1->cam2->cam3, anchor to base_link')
print(' 4. Refine against mechanical prior using least-squares optimisation')
print()
print('Required directory structure for stereo mode:')
print(' {images_dir}/pair_front_right/ — images visible to both front+right')
print(' {images_dir}/pair_right_rear/')
print(' {images_dir}/pair_rear_left/')
print(' {images_dir}/pair_left_front/')
sys.exit(1)
def main():
parser = argparse.ArgumentParser(
description='Compute camera-to-base_link extrinsic transforms for 4x IMX219 cameras.',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=__doc__,
)
parser.add_argument(
'--mode', choices=['mechanical', 'stereo'], default='mechanical',
help='Calibration mode: mechanical (from CAD) or stereo (from images). '
'Default: mechanical.',
)
parser.add_argument(
'--mount-radius', type=float, default=0.05,
help='Camera mount ring radius in metres (default: 0.05).',
)
parser.add_argument(
'--mount-height', type=float, default=0.30,
help='Camera mount height above base_link in metres (default: 0.30).',
)
parser.add_argument(
'--output', required=True,
help='Output path for extrinsics.yaml.',
)
parser.add_argument(
'--generate-launch', type=str, default=None,
metavar='LAUNCH_FILE',
help='Also generate static_transforms.launch.py at this path.',
)
# Stereo mode arguments (for future use)
parser.add_argument(
'--images-dir', type=str, default=None,
help='[stereo mode] Directory containing paired calibration images.',
)
parser.add_argument(
'--intrinsics-dir', type=str, default=None,
help='[stereo mode] Directory containing per-camera camera_info.yaml files.',
)
args = parser.parse_args()
print('=== IMX219 Extrinsic Calibration ===')
print()
if args.mode == 'mechanical':
run_mechanical_mode(args)
elif args.mode == 'stereo':
run_stereo_mode(args)
print()
print('Extrinsics calibration complete.')
print(f'Output: {args.output}')
if args.generate_launch:
print(f'Launch: {args.generate_launch}')
print()
print('Next steps:')
print(' 1. Update saltybot_surround/config/surround_vision_params.yaml')
print(' with camera_info_url paths pointing to calibrated YAMLs')
print(' 2. Run generate_static_transforms.py (or use --generate-launch above)')
print(' 3. Restart saltybot_cameras and saltybot_surround nodes')
if __name__ == '__main__':
main()

View File

@ -1,316 +0,0 @@
#!/usr/bin/env python3
"""
capture_calibration_images.py Capture checkerboard calibration images from IMX219 cameras.
Usage (standalone, no ROS2 required uses OpenCV VideoCapture for CSI):
python3 capture_calibration_images.py --camera front --n-images 30 --output-dir /tmp/cal
Usage (with ROS2 subscribes to image topic):
python3 capture_calibration_images.py --ros --camera front --n-images 30
Controls:
SPACE capture current frame (if checkerboard detected)
q quit / done
r reset captured images for this camera
Outputs: output_dir/cam_{name}/frame_{NNNN}.png
"""
import argparse
import os
import sys
from pathlib import Path
import cv2
import numpy as np
CAMERA_TOPICS = {
'front': '/camera/front/image_raw',
'right': '/camera/right/image_raw',
'rear': '/camera/rear/image_raw',
'left': '/camera/left/image_raw',
}
ALL_CAMERAS = ['front', 'right', 'rear', 'left']
def parse_board_size(s: str) -> tuple:
"""Parse 'WxH' string to (cols, rows) tuple of ints."""
parts = s.lower().split('x')
if len(parts) != 2:
raise argparse.ArgumentTypeError(f'board-size must be WxH, got: {s}')
return (int(parts[0]), int(parts[1]))
def find_checkerboard(img_bgr: np.ndarray, board_size: tuple):
"""Detect checkerboard corners. Returns (found, corners, gray)."""
gray = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)
flags = cv2.CALIB_CB_ADAPTIVE_THRESH | cv2.CALIB_CB_NORMALIZE_IMAGE
found, corners = cv2.findChessboardCorners(gray, board_size, flags)
if found:
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
corners = cv2.cornerSubPix(gray, corners, (7, 7), (-1, -1), criteria)
return found, corners, gray
def draw_overlay(img: np.ndarray, board_size: tuple, found: bool,
corners, captured: int, n_images: int) -> np.ndarray:
"""Draw corner detections and status overlay onto a copy of img."""
display = img.copy()
if found and corners is not None:
cv2.drawChessboardCorners(display, board_size, corners, found)
# Status bar background
cv2.rectangle(display, (0, 0), (display.shape[1], 36), (0, 0, 0), -1)
status_color = (0, 255, 0) if found else (0, 80, 220)
status_text = 'BOARD DETECTED' if found else 'searching for board...'
cv2.putText(display, status_text, (10, 24),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, status_color, 2)
count_text = f'Captured: {captured}/{n_images} [SPACE=capture r=reset q=quit]'
cv2.putText(display, count_text, (display.shape[1] // 2 - 10, 24),
cv2.FONT_HERSHEY_SIMPLEX, 0.55, (200, 200, 200), 1)
if captured >= n_images:
cv2.rectangle(display, (0, 0), (display.shape[1], display.shape[0]),
(0, 200, 0), 8)
cv2.putText(display, 'DONE! Press q to finish.', (60, display.shape[0] // 2),
cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 255, 0), 3)
return display
def capture_loop_opencv(cam_name: str, device: str, board_size: tuple,
n_images: int, output_dir: Path) -> int:
"""Capture calibration images using cv2.VideoCapture (non-ROS mode).
Returns number of images captured.
"""
out_dir = output_dir / f'cam_{cam_name}'
out_dir.mkdir(parents=True, exist_ok=True)
cap = cv2.VideoCapture(device)
if not cap.isOpened():
print(f'[ERROR] Cannot open device: {device}')
return 0
# Try to set resolution
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
captured = 0
frame_idx = 0
window_title = f'Calibration Capture — {cam_name} | SPACE=capture q=quit r=reset'
cv2.namedWindow(window_title, cv2.WINDOW_NORMAL)
print(f'\n[{cam_name.upper()}] Capturing {n_images} images. Output: {out_dir}')
print(' Point the checkerboard at the camera, press SPACE when board is detected.')
while captured < n_images:
ret, frame = cap.read()
if not ret:
print('[WARN] Failed to read frame')
continue
found, corners, _ = find_checkerboard(frame, board_size)
display = draw_overlay(frame, board_size, found, corners, captured, n_images)
cv2.imshow(window_title, display)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
print(f' Quit requested. Captured {captured}/{n_images} images.')
break
elif key == ord('r'):
# Remove all saved images and reset counter
for f in out_dir.glob('frame_*.png'):
f.unlink()
captured = 0
frame_idx = 0
print(f' [RESET] Cleared captured images for {cam_name}.')
elif key == ord(' '):
if found:
save_path = out_dir / f'frame_{frame_idx:04d}.png'
cv2.imwrite(str(save_path), frame)
captured += 1
frame_idx += 1
print(f' Saved ({captured}/{n_images}): {save_path.name}')
# Brief flash to confirm capture
flash = frame.copy()
cv2.rectangle(flash, (0, 0), (flash.shape[1], flash.shape[0]),
(0, 255, 0), 12)
cv2.imshow(window_title, flash)
cv2.waitKey(120)
else:
print(' [SKIP] No checkerboard detected — move board into view.')
cap.release()
cv2.destroyWindow(window_title)
print(f' Done: {captured} images saved to {out_dir}')
return captured
def capture_loop_ros(cam_name: str, board_size: tuple,
n_images: int, output_dir: Path) -> int:
"""Capture calibration images via ROS2 topic subscription.
Returns number of images captured.
"""
try:
import rclpy
from rclpy.node import Node
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
except ImportError as e:
print(f'[ERROR] ROS2 not available: {e}')
print(' Run without --ros for standalone OpenCV mode.')
return 0
out_dir = output_dir / f'cam_{cam_name}'
out_dir.mkdir(parents=True, exist_ok=True)
topic = CAMERA_TOPICS[cam_name]
print(f'\n[{cam_name.upper()}] Subscribing to: {topic}')
print(f' Capturing {n_images} images. Output: {out_dir}')
print(' Press SPACE in the preview window when the board is detected.')
rclpy.init()
node = Node('calibration_capture')
bridge = CvBridge()
captured = [0]
frame_idx = [0]
latest_frame = [None]
def image_callback(msg):
try:
latest_frame[0] = bridge.imgmsg_to_cv2(msg, 'bgr8')
except Exception as e:
node.get_logger().warn(f'cv_bridge error: {e}')
sub = node.create_subscription(Image, topic, image_callback, 1) # noqa: F841
window_title = f'Calibration Capture — {cam_name} (ROS2)'
cv2.namedWindow(window_title, cv2.WINDOW_NORMAL)
import threading
spin_thread = threading.Thread(target=rclpy.spin, args=(node,), daemon=True)
spin_thread.start()
try:
while captured[0] < n_images:
frame = latest_frame[0]
if frame is None:
cv2.waitKey(50)
continue
found, corners, _ = find_checkerboard(frame, board_size)
display = draw_overlay(frame, board_size, found, corners,
captured[0], n_images)
cv2.imshow(window_title, display)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
print(f' Quit. Captured {captured[0]}/{n_images}.')
break
elif key == ord('r'):
for f in out_dir.glob('frame_*.png'):
f.unlink()
captured[0] = 0
frame_idx[0] = 0
print(f' [RESET] Cleared images for {cam_name}.')
elif key == ord(' '):
if found:
save_path = out_dir / f'frame_{frame_idx[0]:04d}.png'
cv2.imwrite(str(save_path), frame)
captured[0] += 1
frame_idx[0] += 1
print(f' Saved ({captured[0]}/{n_images}): {save_path.name}')
else:
print(' [SKIP] No board detected.')
finally:
cv2.destroyWindow(window_title)
node.destroy_node()
rclpy.shutdown()
print(f' Done: {captured[0]} images saved to {out_dir}')
return captured[0]
def main():
parser = argparse.ArgumentParser(
description='Capture checkerboard images for IMX219 fisheye calibration.',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=__doc__,
)
parser.add_argument(
'--camera', choices=['front', 'right', 'rear', 'left', 'all'],
default='front',
help='Which camera to capture from (default: front). Use "all" to iterate all 4.',
)
parser.add_argument(
'--n-images', type=int, default=30,
help='Number of images to capture per camera (default: 30).',
)
parser.add_argument(
'--output-dir', type=str, default='/tmp/saltybot_cal',
help='Root output directory. Images saved to {output_dir}/cam_{name}/ (default: /tmp/saltybot_cal).',
)
parser.add_argument(
'--board-size', type=str, default='8x6',
help='Checkerboard inner corners as WxH (default: 8x6).',
)
parser.add_argument(
'--square-size', type=float, default=0.025,
help='Physical square size in metres (default: 0.025 = 25mm).',
)
parser.add_argument(
'--ros', action='store_true',
help='Use ROS2 topic subscription instead of direct VideoCapture.',
)
parser.add_argument(
'--device', type=str, default='/dev/video0',
help='Video device (non-ROS mode only, default: /dev/video0).',
)
args = parser.parse_args()
board_size = parse_board_size(args.board_size)
output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
cameras = ALL_CAMERAS if args.camera == 'all' else [args.camera]
print(f'Checkerboard: {board_size[0]}x{board_size[1]} inner corners, '
f'{args.square_size * 1000:.0f}mm squares')
print(f'Target: {args.n_images} images per camera')
print(f'Output: {output_dir}')
print(f'Mode: {"ROS2 topics" if args.ros else f"VideoCapture ({args.device})"}')
for i, cam_name in enumerate(cameras):
if len(cameras) > 1:
print(f'\n--- Camera {i + 1}/{len(cameras)}: {cam_name.upper()} ---')
if i > 0:
input(' Position the board in view of this camera, then press ENTER...')
if args.ros:
n_captured = capture_loop_ros(cam_name, board_size, args.n_images, output_dir)
else:
n_captured = capture_loop_opencv(cam_name, args.device, board_size,
args.n_images, output_dir)
if n_captured < 5:
print(f'[WARN] Only {n_captured} images captured for {cam_name}'
f'calibration may fail (need >= 5, recommend >= 20).')
print('\nCapture complete. Run calibrate_camera.py for each camera to compute intrinsics.')
print('Example:')
for cam in cameras:
print(f' python3 calibrate_camera.py \\')
print(f' --images-dir {output_dir}/cam_{cam} \\')
print(f' --output /mnt/nvme/saltybot/calibration/{cam}/camera_info.yaml \\')
print(f' --camera-name camera_{cam} --board-size {args.board_size} \\')
print(f' --square-size {args.square_size} --image-size 640x480')
if __name__ == '__main__':
main()

View File

@ -1,142 +0,0 @@
#!/usr/bin/env python3
"""
generate_static_transforms.py Generate static_transforms.launch.py from extrinsics.yaml.
Usage:
python3 generate_static_transforms.py \\
--extrinsics /path/to/calibration/extrinsics.yaml \\
--output /path/to/jetson/ros2_ws/src/saltybot_bringup/launch/static_transforms.launch.py
"""
import argparse
import sys
from pathlib import Path
import yaml
def load_extrinsics(yaml_path: str) -> dict:
"""Load and parse extrinsics.yaml.
Returns the 'cameras' sub-dict with per-camera transforms.
"""
with open(yaml_path, 'r') as f:
data = yaml.safe_load(f)
if 'cameras' not in data:
raise ValueError(f'extrinsics.yaml missing "cameras" key: {yaml_path}')
return data['cameras'], data
def generate_launch_content(cameras: dict) -> str:
"""Generate static_transforms.launch.py content from cameras dict."""
lines = [
'"""',
'static_transforms.launch.py — Auto-generated by generate_static_transforms.py',
'DO NOT EDIT manually — regenerate with:',
' python3 generate_static_transforms.py \\',
' --extrinsics /path/to/calibration/extrinsics.yaml \\',
' --output <this_file>',
'"""',
'',
'from launch import LaunchDescription',
'from launch_ros.actions import Node',
'',
'',
'def generate_launch_description():',
' return LaunchDescription([',
]
for cam_name, cam_data in cameras.items():
parent = cam_data.get('parent_frame', 'base_link')
child = cam_data.get('child_frame', f'camera_{cam_name}_optical_frame')
tx, ty, tz = cam_data['translation']
qx, qy, qz, qw = cam_data['rotation_quat_xyzw']
lines += [
f' # {cam_name}: {parent} -> {child}',
f' Node(',
f' package=\'tf2_ros\',',
f' executable=\'static_transform_publisher\',',
f' name=\'tf_base_to_camera_{cam_name}\',',
f' arguments=[',
f' \'{tx}\', \'{ty}\', \'{tz}\',',
f' \'{qx}\', \'{qy}\', \'{qz}\', \'{qw}\',',
f' \'{parent}\', \'{child}\',',
f' ],',
f' ),',
]
# Also add RealSense D435i frame at origin (identity) — placeholder
# (actual D435i extrinsics from its own calibration)
lines += [
' # RealSense D435i: base_link -> camera_color_optical_frame',
' # NOTE: Replace with actual D435i extrinsics from stereo calibration',
' Node(',
' package=\'tf2_ros\',',
' executable=\'static_transform_publisher\',',
' name=\'tf_base_to_realsense\',',
' arguments=[',
' \'0.0\', \'0.0\', \'0.25\',',
' \'0.0\', \'0.0\', \'0.0\', \'1.0\',',
' \'base_link\', \'camera_color_optical_frame\',',
' ],',
' ),',
' ])',
'',
]
return '\n'.join(lines)
def main():
parser = argparse.ArgumentParser(
description='Generate static_transforms.launch.py from extrinsics.yaml.',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=__doc__,
)
parser.add_argument(
'--extrinsics', required=True,
help='Path to extrinsics.yaml (produced by calibrate_extrinsics.py).',
)
parser.add_argument(
'--output', required=True,
help='Output path for static_transforms.launch.py.',
)
args = parser.parse_args()
extrinsics_path = args.extrinsics
if not Path(extrinsics_path).exists():
print(f'[ERROR] extrinsics.yaml not found: {extrinsics_path}')
sys.exit(1)
print(f'Loading extrinsics: {extrinsics_path}')
cameras, full_data = load_extrinsics(extrinsics_path)
print(f'Found {len(cameras)} camera(s): {", ".join(cameras.keys())}')
content = generate_launch_content(cameras)
output_path = Path(args.output)
output_path.parent.mkdir(parents=True, exist_ok=True)
with open(output_path, 'w') as f:
f.write(content)
print(f'Written: {output_path}')
print()
print('Transforms:')
print(f' {"Camera":<8} {"Parent":<12} {"Child frame":<36} {"Translation"}')
print(' ' + '-' * 80)
for cam_name, cam_data in cameras.items():
parent = cam_data.get('parent_frame', 'base_link')
child = cam_data.get('child_frame', f'camera_{cam_name}_optical_frame')
tx, ty, tz = cam_data['translation']
print(f' {cam_name:<8} {parent:<12} {child:<36} '
f'[{tx:.3f}, {ty:.3f}, {tz:.3f}]')
print()
print('Next step: include static_transforms.launch.py in your main launch file:')
print(' from launch.actions import IncludeLaunchDescription')
print(' from launch.launch_description_sources import PythonLaunchDescriptionSource')
if __name__ == '__main__':
main()

View File

@ -1,259 +0,0 @@
#!/usr/bin/env python3
"""
preview_undistorted.py Preview undistorted IMX219 image using calibration YAML.
Usage (ROS2 mode):
python3 preview_undistorted.py \\
--camera front \\
--calibration /path/to/calibration/front/camera_info.yaml
Usage (image file mode):
python3 preview_undistorted.py \\
--image /path/to/image.png \\
--calibration /path/to/calibration/front/camera_info.yaml
"""
import argparse
import sys
from pathlib import Path
import cv2
import numpy as np
import yaml
CAMERA_TOPICS = {
'front': '/camera/front/image_raw',
'right': '/camera/right/image_raw',
'rear': '/camera/rear/image_raw',
'left': '/camera/left/image_raw',
}
def load_camera_info(yaml_path: str) -> dict:
"""Load and parse camera_info YAML.
Returns dict with keys: K, D, image_size, camera_name, distortion_model
"""
with open(yaml_path, 'r') as f:
data = yaml.safe_load(f)
w = data['image_width']
h = data['image_height']
K_data = data['camera_matrix']['data']
K = np.array(K_data, dtype=np.float64).reshape(3, 3)
D_data = data['distortion_coefficients']['data']
D = np.array(D_data, dtype=np.float64).reshape(-1, 1)
model = data.get('distortion_model', 'equidistant')
name = data.get('camera_name', 'unknown')
return {
'K': K,
'D': D,
'image_size': (w, h),
'camera_name': name,
'distortion_model': model,
}
def build_undistort_maps(K: np.ndarray, D: np.ndarray,
image_size: tuple, balance: float = 0.0) -> tuple:
"""Compute fisheye undistortion maps.
Args:
K: 3x3 camera matrix
D: (4,1) fisheye distortion coefficients
image_size: (width, height)
balance: 0.0 = no black borders (crop), 1.0 = keep all pixels (black borders)
Returns:
(map1, map2) suitable for cv2.remap
"""
new_K = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(
K, D, image_size, np.eye(3), balance=balance
)
map1, map2 = cv2.fisheye.initUndistortRectifyMap(
K, D, np.eye(3), new_K, image_size, cv2.CV_16SC2
)
return map1, map2, new_K
def undistort_image(img: np.ndarray, map1: np.ndarray, map2: np.ndarray) -> np.ndarray:
"""Apply precomputed undistortion maps to image."""
return cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT)
def show_side_by_side(original: np.ndarray, undistorted: np.ndarray,
title: str = '', rms_info: str = '') -> np.ndarray:
"""Create side-by-side comparison image."""
# Resize to same height if needed
h1, w1 = original.shape[:2]
h2, w2 = undistorted.shape[:2]
if h1 != h2:
undistorted = cv2.resize(undistorted, (w2 * h1 // h2, h1))
divider = np.zeros((h1, 4, 3), dtype=np.uint8)
divider[:] = (80, 80, 80)
combined = np.hstack([original, divider, undistorted])
# Labels
cv2.putText(combined, 'ORIGINAL (fisheye)', (10, 28),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 220, 0), 2)
cv2.putText(combined, 'UNDISTORTED', (w1 + 14, 28),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 220, 255), 2)
if title:
cv2.putText(combined, title, (10, combined.shape[0] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (200, 200, 200), 1)
if rms_info:
cv2.putText(combined, rms_info, (w1 + 14, combined.shape[0] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (200, 200, 200), 1)
return combined
def preview_image_file(image_path: str, cal_info: dict, balance: float) -> None:
"""Show undistorted preview of a single image file."""
img = cv2.imread(image_path)
if img is None:
print(f'[ERROR] Cannot read image: {image_path}')
sys.exit(1)
K = cal_info['K']
D = cal_info['D']
image_size = cal_info['image_size']
name = cal_info['camera_name']
print(f'Camera: {name}')
print(f'Image size: {image_size[0]}x{image_size[1]}')
print(f'fx={K[0,0]:.2f} fy={K[1,1]:.2f} cx={K[0,2]:.2f} cy={K[1,2]:.2f}')
print(f'D={D.flatten().tolist()}')
# Resize input if needed
ih, iw = img.shape[:2]
if (iw, ih) != image_size:
print(f'[WARN] Image size {iw}x{ih} != calibration size {image_size}. Resizing.')
img = cv2.resize(img, image_size)
map1, map2, new_K = build_undistort_maps(K, D, image_size, balance=balance)
undist = undistort_image(img, map1, map2)
print(f'New K after undistort (balance={balance}):')
print(f' fx={new_K[0,0]:.2f} fy={new_K[1,1]:.2f} cx={new_K[0,2]:.2f} cy={new_K[1,2]:.2f}')
combined = show_side_by_side(img, undist, title=str(Path(image_path).name))
cv2.imshow('Undistorted Preview (q=quit)', combined)
print('Press any key to close...')
cv2.waitKey(0)
cv2.destroyAllWindows()
def preview_ros_live(cam_name: str, cal_info: dict, balance: float) -> None:
"""Subscribe to ROS2 camera topic and show live undistorted preview."""
try:
import rclpy
from rclpy.node import Node
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
except ImportError as e:
print(f'[ERROR] ROS2 not available: {e}')
sys.exit(1)
K = cal_info['K']
D = cal_info['D']
image_size = cal_info['image_size']
topic = CAMERA_TOPICS[cam_name]
print(f'Subscribing to: {topic}')
print(f'Image size: {image_size[0]}x{image_size[1]}')
print('Press q in preview window to quit.')
map1, map2, new_K = build_undistort_maps(K, D, image_size, balance=balance)
rclpy.init()
node = Node('preview_undistorted')
bridge = CvBridge()
latest = [None]
def cb(msg):
try:
latest[0] = bridge.imgmsg_to_cv2(msg, 'bgr8')
except Exception as e:
node.get_logger().warn(f'cv_bridge: {e}')
node.create_subscription(Image, topic, cb, 1)
import threading
threading.Thread(target=rclpy.spin, args=(node,), daemon=True).start()
window = f'Undistorted — {cam_name}'
cv2.namedWindow(window, cv2.WINDOW_NORMAL)
try:
while True:
frame = latest[0]
if frame is not None:
fh, fw = frame.shape[:2]
if (fw, fh) != image_size:
frame = cv2.resize(frame, image_size)
undist = undistort_image(frame, map1, map2)
combined = show_side_by_side(frame, undist)
cv2.imshow(window, combined)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
finally:
cv2.destroyAllWindows()
node.destroy_node()
rclpy.shutdown()
def main():
parser = argparse.ArgumentParser(
description='Preview undistorted IMX219 fisheye image from calibration YAML.',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=__doc__,
)
mode_group = parser.add_mutually_exclusive_group(required=True)
mode_group.add_argument(
'--camera', choices=['front', 'right', 'rear', 'left'],
help='Camera name for ROS2 live preview mode.',
)
mode_group.add_argument(
'--image', type=str,
help='Path to a single image file for static preview.',
)
parser.add_argument(
'--calibration', required=True,
help='Path to camera_info.yaml produced by calibrate_camera.py.',
)
parser.add_argument(
'--balance', type=float, default=0.0,
help='Undistort balance: 0.0=crop to valid pixels, 1.0=keep all (black borders). '
'Default: 0.0.',
)
args = parser.parse_args()
cal_path = args.calibration
if not Path(cal_path).exists():
print(f'[ERROR] Calibration file not found: {cal_path}')
sys.exit(1)
print(f'Loading calibration: {cal_path}')
cal_info = load_camera_info(cal_path)
if cal_info['distortion_model'] not in ('equidistant', 'fisheye'):
print(f'[WARN] Distortion model "{cal_info["distortion_model"]}"'
f'expected "equidistant". Results may be incorrect.')
if args.image:
preview_image_file(args.image, cal_info, args.balance)
else:
preview_ros_live(args.camera, cal_info, args.balance)
if __name__ == '__main__':
main()

View File

@ -1,4 +0,0 @@
__pycache__/
*.pyc
*.egg-info/
.pytest_cache/

View File

@ -1,58 +0,0 @@
# mode_switch_params.yaml — saltybot_mode_switch
#
# Launch with:
# ros2 launch saltybot_mode_switch mode_switch.launch.py
#
# Topic wiring:
# /rc/joy → mode_switch_node (CRSF channels)
# /saltybot/balance_state → mode_switch_node (STM32 state)
# /slam_toolbox/pose_with_covariance_stamped → mode_switch_node (SLAM fix)
# /saltybot/control_mode ← mode_switch_node (JSON mode + alpha)
# /saltybot/led_pattern ← mode_switch_node (LED name)
#
# /cmd_vel_auto → cmd_vel_mux_node (from Nav2/follower)
# /saltybot/control_mode → cmd_vel_mux_node
# /cmd_vel ← cmd_vel_mux_node (to bridge)
#
# IMPORTANT: remap Nav2 and person_follower to publish /cmd_vel_auto:
# ros2 launch saltybot_bringup nav2.launch.py cmd_vel_topic:=/cmd_vel_auto
# ── CRSF channel mapping ──────────────────────────────────────────────────────
# Joy axes index for the AUX2 / CH6 mode-toggle switch.
# Typical CRSF→Joy mapping: CH1=axes[0] … CH6=axes[5].
ch6_axis: 5
# Space-separated Joy axes indices for the four control sticks (roll/pitch/throttle/yaw).
# These are monitored for the 10% override threshold.
stick_axes: "0 1 2 3"
# ── Override / return-to-auto ──────────────────────────────────────────────────
# Stick magnitude (01) that triggers instant RC override while in AUTO.
# 10% (0.10) per spec — change only if stick noise causes false triggers.
override_threshold: 0.10
# Seconds sticks must remain neutral before AUTO re-engages after an override.
return_delay_s: 2.0
# ── Ramp ──────────────────────────────────────────────────────────────────────
# RC↔AUTO blend ramp duration (seconds). cmd_vel_mux scales by blend_alpha
# so autonomous commands are eased in/out over this window.
ramp_duration_s: 0.5
# ── RC link health ────────────────────────────────────────────────────────────
# Seconds of Joy silence before RC link is declared lost → force RC mode.
rc_link_timeout_s: 0.5
# ── SLAM fix ──────────────────────────────────────────────────────────────────
# ROS2 topic providing the SLAM localisation pose. Any message received within
# slam_fix_timeout_s counts as a valid fix.
slam_fix_topic: "/slam_toolbox/pose_with_covariance_stamped"
# Seconds after the last SLAM pose before fix is considered lost.
slam_fix_timeout_s: 3.0
# ── Control loop ──────────────────────────────────────────────────────────────
control_rate: 20.0 # Hz
# ── cmd_vel_mux ───────────────────────────────────────────────────────────────
cmd_vel_timeout_s: 0.5 # deadman: zero output if /cmd_vel_auto goes silent

View File

@ -1,59 +0,0 @@
"""mode_switch.launch.py — launch mode_switch_node + cmd_vel_mux_node."""
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
def generate_launch_description():
pkg_share = get_package_share_directory("saltybot_mode_switch")
default_cfg = os.path.join(pkg_share, "config", "mode_switch_params.yaml")
return LaunchDescription([
DeclareLaunchArgument(
"params_file",
default_value=default_cfg,
description="Path to mode_switch_params.yaml",
),
DeclareLaunchArgument(
"ramp_duration_s",
default_value="0.5",
description="RC↔AUTO blend ramp duration (seconds)",
),
DeclareLaunchArgument(
"return_delay_s",
default_value="2.0",
description="Sticks-neutral duration before auto re-engage (seconds)",
),
DeclareLaunchArgument(
"slam_fix_topic",
default_value="/slam_toolbox/pose_with_covariance_stamped",
description="SLAM fix pose topic",
),
Node(
package="saltybot_mode_switch",
executable="mode_switch_node",
name="mode_switch",
output="screen",
parameters=[
LaunchConfiguration("params_file"),
{
"ramp_duration_s": LaunchConfiguration("ramp_duration_s"),
"return_delay_s": LaunchConfiguration("return_delay_s"),
"slam_fix_topic": LaunchConfiguration("slam_fix_topic"),
},
],
),
Node(
package="saltybot_mode_switch",
executable="cmd_vel_mux_node",
name="cmd_vel_mux",
output="screen",
parameters=[LaunchConfiguration("params_file")],
),
])

View File

@ -1,31 +0,0 @@
<?xml version="1.0"?>
<?xml-model href="http://download.ros.org/schema/package_format3.xsd" schematypens="http://www.w3.org/2001/XMLSchema"?>
<package format="3">
<name>saltybot_mode_switch</name>
<version>0.1.0</version>
<description>
Autonomous/RC mode switch for saltybot (Issue #104).
Monitors CRSF CH6 toggle, RC stick override, SLAM fix status, and RC link
health to manage RC↔AUTO transitions with a 500 ms blend ramp.
Publishes /saltybot/control_mode (JSON) and /saltybot/led_pattern.
Includes cmd_vel_mux_node that scales autonomous cmd_vel by blend_alpha.
</description>
<maintainer email="sl-controls@saltylab.local">sl-controls</maintainer>
<license>MIT</license>
<depend>rclpy</depend>
<depend>geometry_msgs</depend>
<depend>sensor_msgs</depend>
<depend>std_msgs</depend>
<buildtool_depend>ament_python</buildtool_depend>
<test_depend>ament_copyright</test_depend>
<test_depend>ament_flake8</test_depend>
<test_depend>ament_pep257</test_depend>
<test_depend>python3-pytest</test_depend>
<export>
<build_type>ament_python</build_type>
</export>
</package>

View File

@ -1,133 +0,0 @@
"""
cmd_vel_mux_node.py Blend-alpha cmd_vel multiplexer (Issue #104).
Sits between the Nav2 / person-follower stack and the cmd_vel_bridge_node,
scaling the autonomous cmd_vel output by the blend_alpha published by
mode_switch_node so that the RC AUTO handoff is smooth over 500 ms.
Topic graph
-----------
Nav2 / follower /cmd_vel_auto
cmd_vel_mux /cmd_vel bridge
mode_switch /saltybot/control_mode (blend_alpha)
In RC mode (blend_alpha 0) the node publishes Twist(0,0) so the bridge
receives zeros this is harmless because the bridge's mode gate already
prevents autonomous commands when the STM32 is in RC_MANUAL.
The bridge's existing ESC ramp handles hardware-level smoothing;
the blend_alpha here provides the higher-level cmd_vel policy ramp.
Parameters
----------
cmd_vel_timeout_s Deadman: zero output if /cmd_vel_auto goes silent (0.5 s)
control_rate Output publish rate (Hz) (20.0)
"""
import json
import time
import rclpy
from rclpy.node import Node
from rclpy.qos import HistoryPolicy, QoSProfile, ReliabilityPolicy
from geometry_msgs.msg import Twist
from std_msgs.msg import String
from saltybot_mode_switch.mode_logic import blend_twist
_BIG = 1e9
class CmdVelMuxNode(Node):
def __init__(self):
super().__init__("cmd_vel_mux")
self.declare_parameter("cmd_vel_timeout_s", 0.5)
self.declare_parameter("control_rate", 20.0)
self._timeout = self.get_parameter("cmd_vel_timeout_s").value
self._rate = self.get_parameter("control_rate").value
reliable = QoSProfile(
reliability=ReliabilityPolicy.RELIABLE,
history=HistoryPolicy.KEEP_LAST,
depth=10,
)
# ── State ─────────────────────────────────────────────────────────────
self._auto_linear_x: float = 0.0
self._auto_angular_z: float = 0.0
self._blend_alpha: float = 0.0
self._last_auto_t: float = 0.0
# ── Subscriptions ─────────────────────────────────────────────────────
self.create_subscription(
Twist, "/cmd_vel_auto", self._auto_cb, reliable)
self.create_subscription(
String, "/saltybot/control_mode", self._mode_cb, reliable)
# ── Publisher ─────────────────────────────────────────────────────────
self._pub = self.create_publisher(Twist, "/cmd_vel", reliable)
# ── Timer ─────────────────────────────────────────────────────────────
self._timer = self.create_timer(1.0 / self._rate, self._publish_cb)
self.get_logger().info(
f"CmdVelMuxNode ready rate={self._rate:.0f}Hz "
f"timeout={self._timeout:.1f}s"
)
# ── Callbacks ─────────────────────────────────────────────────────────────
def _auto_cb(self, msg: Twist) -> None:
self._auto_linear_x = msg.linear.x
self._auto_angular_z = msg.angular.z
self._last_auto_t = time.monotonic()
def _mode_cb(self, msg: String) -> None:
try:
data = json.loads(msg.data)
self._blend_alpha = float(data.get("blend_alpha", 0.0))
except (json.JSONDecodeError, TypeError, ValueError):
self._blend_alpha = 0.0
# ── 20 Hz publish ─────────────────────────────────────────────────────────
def _publish_cb(self) -> None:
now = time.monotonic()
# Deadman: zero if autonomous source went silent
auto_age = (now - self._last_auto_t) if self._last_auto_t > 0.0 else _BIG
if auto_age > self._timeout:
linear_x, angular_z = 0.0, 0.0
else:
linear_x, angular_z = blend_twist(
self._auto_linear_x,
self._auto_angular_z,
self._blend_alpha,
)
twist = Twist()
twist.linear.x = linear_x
twist.angular.z = angular_z
self._pub.publish(twist)
# ── Entry point ───────────────────────────────────────────────────────────────
def main(args=None):
rclpy.init(args=args)
node = CmdVelMuxNode()
try:
rclpy.spin(node)
except KeyboardInterrupt:
pass
finally:
node.destroy_node()
rclpy.try_shutdown()
if __name__ == "__main__":
main()

View File

@ -1,241 +0,0 @@
"""
mode_logic.py Pure RC/Autonomous mode-switch logic for saltybot.
All functions are stateless or operate on plain Python types so the full
state machine can be exercised in unit tests without a ROS2 runtime.
Mode vocabulary
---------------
"RC" STM32 executing RC pilot commands; Jetson cmd_vel blocked.
"RAMP_TO_AUTO" Transitioning RCAUTO; blend_alpha 0.01.0 over ramp_s.
"AUTO" STM32 executing Jetson cmd_vel; RC sticks idle.
"RAMP_TO_RC" Transitioning AUTORC; blend_alpha 1.00.0 over ramp_s.
Blend alpha
-----------
The node publishes blend_alpha [0, 1] in /saltybot/control_mode.
The cmd_vel_mux_node scales autonomous cmd_vel by alpha before publishing
to /cmd_vel (the bridge input). alpha=0 no autonomous motion.
RC override
-----------
Any stick magnitude > override_threshold while in AUTO instantly forces RC
(alpha 0 immediately; no ramp). _override_active is set so the
return-to-auto path requires sticks to be neutral for return_delay seconds.
CH6 / toggle
------------
ch6_auto=True means the pilot has requested AUTO (switch up/high).
ch6_auto=False means the pilot has requested RC (switch down/low).
Pure CH6 transitions (no stick activity) always ramp gracefully.
Safety interlocks
-----------------
No AUTO without slam_ok (valid SLAM fix received recently).
Instant drop to RC when rc_link_ok goes False (RC link lost).
These checks are applied at every control tick.
"""
import math
# ─── Constants ──────────────────────────────────────────────────────────────
MODES = ("RC", "RAMP_TO_AUTO", "AUTO", "RAMP_TO_RC")
LED_PATTERNS = {
"rc_normal": "solid_yellow",
"rc_no_slam": "solid_yellow", # no slam needed in RC
"auto_ok": "solid_green",
"auto_no_slam": "blink_orange_fast", # shouldn't happen (interlock), but defensive
"ramp_to_auto": "blink_green_slow",
"ramp_to_rc": "blink_yellow_slow",
"no_rc_link": "blink_red_fast",
}
# ─── Stick / CH6 helpers ────────────────────────────────────────────────────
def ch6_wants_auto(axes: list, ch6_idx: int, threshold: float = 0.5) -> bool:
"""
Return True when CH6 (aux switch) is in the AUTO position.
Typical CRSFJoy mapping: AUX2 = axes[5].
Switch up (high) = +1.0 AUTO; switch down (low) = -1.0 RC.
threshold default 0.5 gives hysteresis against mid-travel noise.
"""
if ch6_idx < 0 or ch6_idx >= len(axes):
return False
return float(axes[ch6_idx]) > threshold
def sticks_active(
axes: list,
stick_indices: tuple,
threshold: float = 0.10,
) -> bool:
"""
Return True when any monitored stick axis exceeds threshold magnitude.
stick_indices: tuple of Joy axes indices for the four sticks
(roll, pitch, throttle, yaw). Default CRSF mapping:
(0, 1, 2, 3).
threshold: 10 % of full deflection (issue spec).
"""
return any(
abs(float(axes[i])) > threshold
for i in stick_indices
if i < len(axes)
)
# ─── Alpha / ramp helpers ───────────────────────────────────────────────────
def ramp_alpha(elapsed_s: float, ramp_s: float) -> float:
"""
Compute forward blend alpha [0, 1] linearly over ramp_s seconds.
elapsed_s : seconds since ramp start.
ramp_s : total ramp duration (seconds).
"""
if ramp_s <= 0.0:
return 1.0
return min(1.0, elapsed_s / ramp_s)
def alpha_for_mode(mode: str, elapsed_s: float, ramp_s: float) -> float:
"""
Return the blend alpha appropriate for `mode`.
RC 0.0
RAMP_TO_AUTO 0.0 1.0 (linear with elapsed)
AUTO 1.0
RAMP_TO_RC 1.0 0.0 (linear with elapsed)
"""
if mode == "RC":
return 0.0
if mode == "AUTO":
return 1.0
if mode == "RAMP_TO_AUTO":
return ramp_alpha(elapsed_s, ramp_s)
if mode == "RAMP_TO_RC":
return 1.0 - ramp_alpha(elapsed_s, ramp_s)
return 0.0
# ─── LED pattern ────────────────────────────────────────────────────────────
def led_pattern(mode: str, slam_ok: bool, rc_link_ok: bool) -> str:
"""Return the LED pattern name for the current operating state."""
if not rc_link_ok:
return LED_PATTERNS["no_rc_link"]
if mode in ("RC", "RAMP_TO_RC"):
return LED_PATTERNS["rc_normal"]
if mode == "RAMP_TO_AUTO":
return LED_PATTERNS["ramp_to_auto"]
if mode == "AUTO":
return LED_PATTERNS["auto_ok"] if slam_ok else LED_PATTERNS["auto_no_slam"]
return LED_PATTERNS["no_rc_link"]
# ─── State machine step ──────────────────────────────────────────────────────
def step_state(
current_mode: str,
ramp_elapsed_s: float,
ramp_s: float,
ch6_auto: bool,
sticks_now_active: bool,
sticks_neutral_elapsed_s: float,
return_delay_s: float,
slam_ok: bool,
rc_link_ok: bool,
override_active: bool,
) -> tuple:
"""
Compute (next_mode, next_override_active, instant_to_rc).
Parameters
----------
current_mode : current mode string
ramp_elapsed_s : seconds since last ramp start
ramp_s : configured ramp duration
ch6_auto : True = pilot switch requests AUTO
sticks_now_active : True = at least one stick > override_threshold
sticks_neutral_elapsed_s: seconds since all sticks went neutral (0 if still active)
return_delay_s : seconds of stick-neutral required to re-engage AUTO
slam_ok : True = SLAM fix is valid
rc_link_ok : True = CRSF link alive
override_active : True = currently in stick-override RC
Returns
-------
next_mode : new mode string
next_override_active: updated override flag
instant_to_rc : True when dropping to RC without ramp (safety / stick override)
"""
instant_to_rc = False
# ── Safety: RC link lost → instant RC ────────────────────────────────────
if not rc_link_ok:
return "RC", False, True
# ── Compute override transitions ──────────────────────────────────────────
next_override = override_active
if sticks_now_active and current_mode in ("AUTO", "RAMP_TO_AUTO", "RAMP_TO_RC"):
# Stick input while AUTO or any transition → instant RC override
next_override = True
return "RC", True, True
if not sticks_now_active and sticks_neutral_elapsed_s >= return_delay_s:
# Sticks have been neutral long enough — clear override
next_override = False
# ── Ramp completion ───────────────────────────────────────────────────────
ramp_done = ramp_elapsed_s >= ramp_s - 1e-9 # epsilon guards float accumulation
# ── Transitions per state ─────────────────────────────────────────────────
if current_mode == "RC":
if ch6_auto and slam_ok and not next_override:
return "RAMP_TO_AUTO", next_override, False
return "RC", next_override, False
if current_mode == "RAMP_TO_AUTO":
if not ch6_auto:
return "RAMP_TO_RC", next_override, False
if not slam_ok:
return "RAMP_TO_RC", next_override, False
if ramp_done:
return "AUTO", next_override, False
return "RAMP_TO_AUTO", next_override, False
if current_mode == "AUTO":
if not ch6_auto:
return "RAMP_TO_RC", next_override, False
if not slam_ok:
return "RAMP_TO_RC", next_override, False
return "AUTO", next_override, False
if current_mode == "RAMP_TO_RC":
if ch6_auto and slam_ok and not next_override:
return "RAMP_TO_AUTO", next_override, False
if ramp_done:
return "RC", next_override, False
return "RAMP_TO_RC", next_override, False
return "RC", next_override, True # unknown state → safe fallback
# ─── cmd_vel blending ───────────────────────────────────────────────────────
def blend_twist(
auto_linear_x: float,
auto_angular_z: float,
alpha: float,
) -> tuple:
"""
Scale autonomous cmd_vel by blend alpha.
Returns (linear_x, angular_z) for the blended output.
alpha=0 zeros; alpha=1 full autonomous.
"""
a = max(0.0, min(1.0, alpha))
return auto_linear_x * a, auto_angular_z * a

View File

@ -1,307 +0,0 @@
"""
mode_switch_node.py Autonomous/RC mode switch for saltybot (Issue #104).
Inputs
------
/rc/joy (sensor_msgs/Joy)
CRSF channel axes:
axes[ch6_axis] AUX2 mode toggle (+1=AUTO, -1=RC)
axes[stick_axes...] Roll/Pitch/Throttle/Yaw override detection
/saltybot/balance_state (std_msgs/String JSON)
Parsed for RC link health (field "rc_link") and STM32 mode.
<slam_fix_topic> (geometry_msgs/PoseWithCovarianceStamped)
Any message received within slam_fix_timeout_s SLAM fix valid.
Outputs
-------
/saltybot/control_mode (std_msgs/String JSON)
{
"mode": "RC" | "RAMP_TO_AUTO" | "AUTO" | "RAMP_TO_RC",
"blend_alpha": 0.0 1.0,
"slam_ok": bool,
"rc_link_ok": bool,
"override_active": bool
}
/saltybot/led_pattern (std_msgs/String)
LED pattern name consumed by the LED controller node.
State machine
-------------
See mode_logic.py for the full transition table.
RC pilot in control; blend_alpha = 0.0
RAMP_TO_AUTO transitioning; blend_alpha 01 over ramp_duration_s (500 ms)
AUTO Jetson in control; blend_alpha = 1.0
RAMP_TO_RC transitioning; blend_alpha 10 over ramp_duration_s (500 ms)
Safety interlocks
-----------------
Any stick > override_threshold while AUTO/RAMP_TO_AUTO instant RC.
rc_link lost (Joy silent > rc_link_timeout_s) instant RC.
AUTO not entered without valid SLAM fix.
SLAM fix lost while AUTO RAMP_TO_RC.
Parameters
----------
ch6_axis Joy axes index for the mode-toggle switch (default 5)
stick_axes Space-separated Joy indices for 4 sticks (default "0 1 2 3")
override_threshold Stick magnitude for instant RC override (default 0.10)
return_delay_s Sticks-neutral duration before re-engage (default 2.0 s)
ramp_duration_s RCAUTO blend ramp duration (default 0.5 s)
rc_link_timeout_s Joy silence threshold RC link lost (default 0.5 s)
slam_fix_topic Pose topic for SLAM fix detection (default /slam_toolbox/pose_with_covariance_stamped)
slam_fix_timeout_s Pose silence threshold SLAM fix lost (default 3.0 s)
control_rate State machine tick rate (Hz) (default 20.0)
"""
import json
import time
import rclpy
from rclpy.node import Node
from rclpy.qos import HistoryPolicy, QoSProfile, ReliabilityPolicy, DurabilityPolicy
from geometry_msgs.msg import PoseWithCovarianceStamped
from sensor_msgs.msg import Joy
from std_msgs.msg import String
from saltybot_mode_switch.mode_logic import (
alpha_for_mode,
ch6_wants_auto,
led_pattern,
step_state,
sticks_active,
)
_BIG = 1e9 # sentinel: "never received"
class ModeSwitchNode(Node):
def __init__(self):
super().__init__("mode_switch")
# ── Parameters ────────────────────────────────────────────────────────
self.declare_parameter("ch6_axis", 5)
self.declare_parameter("stick_axes", "0 1 2 3")
self.declare_parameter("override_threshold", 0.10)
self.declare_parameter("return_delay_s", 2.0)
self.declare_parameter("ramp_duration_s", 0.5)
self.declare_parameter("rc_link_timeout_s", 0.5)
self.declare_parameter("slam_fix_topic",
"/slam_toolbox/pose_with_covariance_stamped")
self.declare_parameter("slam_fix_timeout_s", 3.0)
self.declare_parameter("control_rate", 20.0)
self._reload_params()
# ── State ─────────────────────────────────────────────────────────────
self._mode: str = "RC"
self._override_active: bool = False
self._ramp_start: float = 0.0 # monotonic time ramp began
self._sticks_neutral_since: float | None = None
self._last_joy_t: float = 0.0 # monotonic; 0 = never
self._last_slam_t: float = 0.0
self._joy_axes: list = []
self._stm32_mode: int = 0 # from balance_state JSON
# ── QoS ───────────────────────────────────────────────────────────────
best_effort = QoSProfile(
reliability=ReliabilityPolicy.BEST_EFFORT,
history=HistoryPolicy.KEEP_LAST,
depth=5,
)
reliable = QoSProfile(
reliability=ReliabilityPolicy.RELIABLE,
history=HistoryPolicy.KEEP_LAST,
depth=10,
)
# SLAM toolbox uses transient_local for its pose topic
transient = QoSProfile(
reliability=ReliabilityPolicy.RELIABLE,
history=HistoryPolicy.KEEP_LAST,
durability=DurabilityPolicy.TRANSIENT_LOCAL,
depth=1,
)
# ── Subscriptions ─────────────────────────────────────────────────────
self.create_subscription(Joy, "/rc/joy", self._joy_cb, best_effort)
self.create_subscription(
String, "/saltybot/balance_state", self._balance_cb, reliable)
slam_topic = self.get_parameter("slam_fix_topic").value
# Try transient_local first (SLAM Toolbox); fall back gracefully if not
try:
self.create_subscription(
PoseWithCovarianceStamped, slam_topic,
self._slam_cb, transient)
except Exception:
self.create_subscription(
PoseWithCovarianceStamped, slam_topic,
self._slam_cb, reliable)
# ── Publishers ────────────────────────────────────────────────────────
self._mode_pub = self.create_publisher(
String, "/saltybot/control_mode", reliable)
self._led_pub = self.create_publisher(
String, "/saltybot/led_pattern", reliable)
# ── Control timer ─────────────────────────────────────────────────────
self._timer = self.create_timer(
1.0 / self._p["control_rate"], self._tick)
self.get_logger().info(
f"ModeSwitchNode ready "
f"ramp={self._p['ramp_duration_s']:.1f}s "
f"override_thresh={self._p['override_threshold']:.0%} "
f"return_delay={self._p['return_delay_s']:.1f}s "
f"slam_topic={slam_topic}"
)
# ── Parameter reload ──────────────────────────────────────────────────────
def _reload_params(self):
raw_stick_axes = self.get_parameter("stick_axes").value
self._p = {
"ch6_axis": int(self.get_parameter("ch6_axis").value),
"stick_indices": tuple(int(x) for x in raw_stick_axes.split()),
"override_threshold": float(self.get_parameter("override_threshold").value),
"return_delay_s": float(self.get_parameter("return_delay_s").value),
"ramp_duration_s": float(self.get_parameter("ramp_duration_s").value),
"rc_link_timeout_s": float(self.get_parameter("rc_link_timeout_s").value),
"slam_fix_timeout_s": float(self.get_parameter("slam_fix_timeout_s").value),
"control_rate": float(self.get_parameter("control_rate").value),
}
# ── Callbacks ─────────────────────────────────────────────────────────────
def _joy_cb(self, msg: Joy) -> None:
self._joy_axes = list(msg.axes)
self._last_joy_t = time.monotonic()
def _balance_cb(self, msg: String) -> None:
try:
data = json.loads(msg.data)
# "mode" is a label string; map back to int for reference
mode_label = data.get("mode", "RC_MANUAL")
self._stm32_mode = {"RC_MANUAL": 0, "RC_ASSISTED": 1,
"AUTONOMOUS": 2}.get(mode_label, 0)
except (json.JSONDecodeError, TypeError):
pass
def _slam_cb(self, msg: PoseWithCovarianceStamped) -> None:
self._last_slam_t = time.monotonic()
# ── 20 Hz control tick ────────────────────────────────────────────────────
def _tick(self) -> None:
self._reload_params()
now = time.monotonic()
p = self._p
# ── Derive inputs ─────────────────────────────────────────────────────
joy_age = (now - self._last_joy_t) if self._last_joy_t > 0.0 else _BIG
slam_age = (now - self._last_slam_t) if self._last_slam_t > 0.0 else _BIG
rc_link_ok = joy_age <= p["rc_link_timeout_s"]
slam_ok = slam_age <= p["slam_fix_timeout_s"]
axes = self._joy_axes
ch6 = ch6_wants_auto(axes, p["ch6_axis"])
active = sticks_active(axes, p["stick_indices"], p["override_threshold"])
# Track neutral timer
if active:
self._sticks_neutral_since = None
elif self._sticks_neutral_since is None:
self._sticks_neutral_since = now
neutral_elapsed = (
(now - self._sticks_neutral_since)
if self._sticks_neutral_since is not None else 0.0
)
ramp_elapsed = now - self._ramp_start
# ── State machine step ────────────────────────────────────────────────
prev_mode = self._mode
next_mode, next_override, instant = step_state(
current_mode=self._mode,
ramp_elapsed_s=ramp_elapsed,
ramp_s=p["ramp_duration_s"],
ch6_auto=ch6,
sticks_now_active=active,
sticks_neutral_elapsed_s=neutral_elapsed,
return_delay_s=p["return_delay_s"],
slam_ok=slam_ok,
rc_link_ok=rc_link_ok,
override_active=self._override_active,
)
if next_mode != prev_mode:
# Log transitions; reset ramp timer on ramp entry
self.get_logger().info(
f"Mode: {prev_mode}{next_mode}"
f" slam_ok={slam_ok} rc_link={rc_link_ok}"
f" override={next_override}"
)
if next_mode in ("RAMP_TO_AUTO", "RAMP_TO_RC"):
self._ramp_start = now
ramp_elapsed = 0.0
self._mode = next_mode
self._override_active = next_override
# ── Compute blend alpha ───────────────────────────────────────────────
current_ramp_elapsed = now - self._ramp_start
alpha = alpha_for_mode(next_mode, current_ramp_elapsed, p["ramp_duration_s"])
if instant:
alpha = 0.0
# ── Log steady warnings ───────────────────────────────────────────────
if next_mode == "AUTO" and not slam_ok:
self.get_logger().warn(
"In AUTO but SLAM fix lost — will exit AUTO",
throttle_duration_sec=2.0)
if not rc_link_ok:
self.get_logger().warn(
"RC link lost — forced RC mode",
throttle_duration_sec=1.0)
# ── Publish /saltybot/control_mode ────────────────────────────────────
payload = {
"mode": next_mode,
"blend_alpha": round(alpha, 4),
"slam_ok": slam_ok,
"rc_link_ok": rc_link_ok,
"override_active": next_override,
}
mode_msg = String()
mode_msg.data = json.dumps(payload)
self._mode_pub.publish(mode_msg)
# ── Publish /saltybot/led_pattern ─────────────────────────────────────
pattern = led_pattern(next_mode, slam_ok, rc_link_ok)
led_msg = String()
led_msg.data = pattern
self._led_pub.publish(led_msg)
# ── Entry point ───────────────────────────────────────────────────────────────
def main(args=None):
rclpy.init(args=args)
node = ModeSwitchNode()
try:
rclpy.spin(node)
except KeyboardInterrupt:
pass
finally:
node.destroy_node()
rclpy.try_shutdown()
if __name__ == "__main__":
main()

View File

@ -1,5 +0,0 @@
[develop]
script_dir=$base/lib/saltybot_mode_switch
[install]
install_scripts=$base/lib/saltybot_mode_switch

View File

@ -1,33 +0,0 @@
from setuptools import setup, find_packages
import os
from glob import glob
package_name = "saltybot_mode_switch"
setup(
name=package_name,
version="0.1.0",
packages=find_packages(exclude=["test"]),
data_files=[
("share/ament_index/resource_index/packages",
[f"resource/{package_name}"]),
(f"share/{package_name}", ["package.xml"]),
(os.path.join("share", package_name, "config"),
glob("config/*.yaml")),
(os.path.join("share", package_name, "launch"),
glob("launch/*.py")),
],
install_requires=["setuptools"],
zip_safe=True,
maintainer="sl-controls",
maintainer_email="sl-controls@saltylab.local",
description="Autonomous/RC mode switch with 500 ms blend ramp (Issue #104)",
license="MIT",
tests_require=["pytest"],
entry_points={
"console_scripts": [
f"mode_switch_node = {package_name}.mode_switch_node:main",
f"cmd_vel_mux_node = {package_name}.cmd_vel_mux_node:main",
],
},
)

View File

@ -1,481 +0,0 @@
"""
test_mode_logic.py Unit tests for saltybot_mode_switch pure logic.
Covers:
- ch6_wants_auto / sticks_active helpers
- ramp_alpha / alpha_for_mode
- led_pattern
- step_state state machine (all transitions)
- blend_twist
No ROS2 runtime required.
"""
import sys
import os
import math
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import pytest
from saltybot_mode_switch.mode_logic import (
alpha_for_mode,
blend_twist,
ch6_wants_auto,
led_pattern,
ramp_alpha,
step_state,
sticks_active,
)
# ─────────────────────────────────────────────────────────────────────────────
# Helpers
# ─────────────────────────────────────────────────────────────────────────────
def _step(
mode="RC",
ramp_elapsed=0.0,
ramp_s=0.5,
ch6_auto=False,
sticks_now_active=False,
neutral_elapsed=0.0,
return_delay=2.0,
slam_ok=True,
rc_link_ok=True,
override_active=False,
):
return step_state(
current_mode=mode,
ramp_elapsed_s=ramp_elapsed,
ramp_s=ramp_s,
ch6_auto=ch6_auto,
sticks_now_active=sticks_now_active,
sticks_neutral_elapsed_s=neutral_elapsed,
return_delay_s=return_delay,
slam_ok=slam_ok,
rc_link_ok=rc_link_ok,
override_active=override_active,
)
# ─────────────────────────────────────────────────────────────────────────────
# ch6_wants_auto
# ─────────────────────────────────────────────────────────────────────────────
class TestCh6WantsAuto:
def test_high_value_returns_true(self):
assert ch6_wants_auto([0.0, 0.0, 0.0, 0.0, 0.0, 1.0], ch6_idx=5) is True
def test_low_value_returns_false(self):
assert ch6_wants_auto([0.0, 0.0, 0.0, 0.0, 0.0, -1.0], ch6_idx=5) is False
def test_mid_value_below_threshold(self):
assert ch6_wants_auto([0.0, 0.0, 0.0, 0.0, 0.0, 0.4], ch6_idx=5) is False
def test_exactly_at_threshold(self):
# 0.5 > 0.5 is False; require strictly above
assert ch6_wants_auto([0.0, 0.0, 0.0, 0.0, 0.0, 0.5], ch6_idx=5) is False
def test_just_above_threshold(self):
assert ch6_wants_auto([0.0, 0.0, 0.0, 0.0, 0.0, 0.51], ch6_idx=5) is True
def test_out_of_range_index(self):
assert ch6_wants_auto([1.0], ch6_idx=10) is False
def test_empty_axes(self):
assert ch6_wants_auto([], ch6_idx=0) is False
def test_custom_threshold(self):
assert ch6_wants_auto([0.0, 0.3], ch6_idx=1, threshold=0.2) is True
assert ch6_wants_auto([0.0, 0.3], ch6_idx=1, threshold=0.5) is False
# ─────────────────────────────────────────────────────────────────────────────
# sticks_active
# ─────────────────────────────────────────────────────────────────────────────
class TestSticksActive:
def test_all_zero_not_active(self):
assert sticks_active([0.0, 0.0, 0.0, 0.0], (0, 1, 2, 3)) is False
def test_one_stick_above_threshold(self):
assert sticks_active([0.0, 0.15, 0.0, 0.0], (0, 1, 2, 3)) is True
def test_all_exactly_at_threshold_not_active(self):
axes = [0.10, 0.10, 0.10, 0.10]
assert sticks_active(axes, (0, 1, 2, 3), threshold=0.10) is False
def test_just_above_threshold_active(self):
assert sticks_active([0.0, 0.0, 0.0, 0.11], (0, 1, 2, 3)) is True
def test_negative_deflection_detected(self):
assert sticks_active([-0.5, 0.0, 0.0, 0.0], (0,)) is True
def test_out_of_range_index_skipped(self):
# index 5 is out of range for a 4-element list — should not crash
assert sticks_active([0.0, 0.0, 0.0, 0.0], (0, 1, 5)) is False
def test_only_monitored_axes_checked(self):
# axes[4] = 0.9 but only (0,1,2,3) are monitored
assert sticks_active([0.0, 0.0, 0.0, 0.0, 0.9], (0, 1, 2, 3)) is False
# ─────────────────────────────────────────────────────────────────────────────
# ramp_alpha / alpha_for_mode
# ─────────────────────────────────────────────────────────────────────────────
class TestRampAlpha:
def test_zero_elapsed_returns_zero(self):
assert ramp_alpha(0.0, 0.5) == pytest.approx(0.0)
def test_full_elapsed_returns_one(self):
assert ramp_alpha(0.5, 0.5) == pytest.approx(1.0)
def test_beyond_duration_clamped_to_one(self):
assert ramp_alpha(1.0, 0.5) == pytest.approx(1.0)
def test_halfway(self):
assert ramp_alpha(0.25, 0.5) == pytest.approx(0.5)
def test_zero_duration_returns_one(self):
assert ramp_alpha(0.0, 0.0) == pytest.approx(1.0)
class TestAlphaForMode:
def test_rc_returns_zero(self):
assert alpha_for_mode("RC", 0.3, 0.5) == 0.0
def test_auto_returns_one(self):
assert alpha_for_mode("AUTO", 0.3, 0.5) == 1.0
def test_ramp_to_auto_mid(self):
assert alpha_for_mode("RAMP_TO_AUTO", 0.25, 0.5) == pytest.approx(0.5)
def test_ramp_to_rc_mid(self):
assert alpha_for_mode("RAMP_TO_RC", 0.25, 0.5) == pytest.approx(0.5)
def test_ramp_to_auto_start(self):
assert alpha_for_mode("RAMP_TO_AUTO", 0.0, 0.5) == pytest.approx(0.0)
def test_ramp_to_rc_start(self):
assert alpha_for_mode("RAMP_TO_RC", 0.0, 0.5) == pytest.approx(1.0)
def test_ramp_to_auto_complete(self):
assert alpha_for_mode("RAMP_TO_AUTO", 0.5, 0.5) == pytest.approx(1.0)
def test_ramp_to_rc_complete(self):
assert alpha_for_mode("RAMP_TO_RC", 0.5, 0.5) == pytest.approx(0.0)
def test_unknown_mode_returns_zero(self):
assert alpha_for_mode("UNKNOWN", 0.3, 0.5) == 0.0
# ─────────────────────────────────────────────────────────────────────────────
# led_pattern
# ─────────────────────────────────────────────────────────────────────────────
class TestLedPattern:
def test_no_rc_link_overrides_all(self):
for mode in ("RC", "RAMP_TO_AUTO", "AUTO", "RAMP_TO_RC"):
assert led_pattern(mode, slam_ok=True, rc_link_ok=False) == "blink_red_fast"
def test_rc_mode_yellow(self):
assert led_pattern("RC", slam_ok=True, rc_link_ok=True) == "solid_yellow"
def test_ramp_to_rc_yellow(self):
assert led_pattern("RAMP_TO_RC", slam_ok=True, rc_link_ok=True) == "solid_yellow"
def test_auto_ok_green(self):
assert led_pattern("AUTO", slam_ok=True, rc_link_ok=True) == "solid_green"
def test_auto_no_slam_orange(self):
assert led_pattern("AUTO", slam_ok=False, rc_link_ok=True) == "blink_orange_fast"
def test_ramp_to_auto_blink_green(self):
assert led_pattern("RAMP_TO_AUTO", slam_ok=True, rc_link_ok=True) == "blink_green_slow"
def test_rc_slam_state_irrelevant(self):
p1 = led_pattern("RC", slam_ok=True, rc_link_ok=True)
p2 = led_pattern("RC", slam_ok=False, rc_link_ok=True)
assert p1 == p2 # SLAM doesn't affect RC LED
# ─────────────────────────────────────────────────────────────────────────────
# step_state — RC transitions
# ─────────────────────────────────────────────────────────────────────────────
class TestStepStateFromRC:
def test_rc_stays_rc_by_default(self):
mode, override, instant = _step(mode="RC")
assert mode == "RC"
assert not instant
def test_rc_to_ramp_when_ch6_auto(self):
mode, override, instant = _step(mode="RC", ch6_auto=True)
assert mode == "RAMP_TO_AUTO"
assert not instant
def test_rc_stays_rc_when_ch6_auto_but_no_slam(self):
mode, _, _ = _step(mode="RC", ch6_auto=True, slam_ok=False)
assert mode == "RC"
def test_rc_stays_rc_when_ch6_auto_but_no_rc_link(self):
mode, _, instant = _step(mode="RC", ch6_auto=True, rc_link_ok=False)
assert mode == "RC"
assert instant # RC link lost → instant
def test_rc_stays_rc_when_override_active_and_sticks_not_neutral(self):
mode, _, _ = _step(
mode="RC", ch6_auto=True, slam_ok=True,
override_active=True, neutral_elapsed=1.0, return_delay=2.0)
assert mode == "RC"
def test_rc_to_ramp_after_override_clears(self):
# override clears after sticks neutral >= return_delay
mode, override, _ = _step(
mode="RC", ch6_auto=True, slam_ok=True,
override_active=True, neutral_elapsed=2.5, return_delay=2.0)
assert mode == "RAMP_TO_AUTO"
assert not override
# ─────────────────────────────────────────────────────────────────────────────
# step_state — RAMP_TO_AUTO transitions
# ─────────────────────────────────────────────────────────────────────────────
class TestStepStateFromRampToAuto:
def test_continues_ramping(self):
mode, _, _ = _step(mode="RAMP_TO_AUTO", ramp_elapsed=0.2, ramp_s=0.5,
ch6_auto=True)
assert mode == "RAMP_TO_AUTO"
def test_completes_to_auto(self):
mode, _, _ = _step(mode="RAMP_TO_AUTO", ramp_elapsed=0.5, ramp_s=0.5,
ch6_auto=True)
assert mode == "AUTO"
def test_stick_override_instant_rc(self):
mode, override, instant = _step(
mode="RAMP_TO_AUTO", ch6_auto=True,
sticks_now_active=True)
assert mode == "RC"
assert override is True
assert instant is True
def test_ch6_low_aborts_to_ramp_rc(self):
mode, _, _ = _step(mode="RAMP_TO_AUTO", ch6_auto=False, ramp_elapsed=0.1)
assert mode == "RAMP_TO_RC"
def test_slam_lost_aborts_to_ramp_rc(self):
mode, _, _ = _step(mode="RAMP_TO_AUTO", ch6_auto=True, slam_ok=False)
assert mode == "RAMP_TO_RC"
def test_rc_link_lost_instant_rc(self):
mode, _, instant = _step(mode="RAMP_TO_AUTO", ch6_auto=True, rc_link_ok=False)
assert mode == "RC"
assert instant is True
# ─────────────────────────────────────────────────────────────────────────────
# step_state — AUTO transitions
# ─────────────────────────────────────────────────────────────────────────────
class TestStepStateFromAuto:
def test_auto_stays_auto(self):
mode, _, _ = _step(mode="AUTO", ch6_auto=True)
assert mode == "AUTO"
def test_stick_override_instant_rc(self):
mode, override, instant = _step(
mode="AUTO", ch6_auto=True, sticks_now_active=True)
assert mode == "RC"
assert override is True
assert instant is True
def test_ch6_low_ramp_to_rc(self):
mode, _, instant = _step(mode="AUTO", ch6_auto=False)
assert mode == "RAMP_TO_RC"
assert not instant
def test_slam_lost_ramp_to_rc(self):
mode, _, instant = _step(mode="AUTO", ch6_auto=True, slam_ok=False)
assert mode == "RAMP_TO_RC"
assert not instant
def test_rc_link_lost_instant_rc(self):
mode, _, instant = _step(mode="AUTO", ch6_auto=True, rc_link_ok=False)
assert mode == "RC"
assert instant is True
# ─────────────────────────────────────────────────────────────────────────────
# step_state — RAMP_TO_RC transitions
# ─────────────────────────────────────────────────────────────────────────────
class TestStepStateFromRampToRC:
def test_continues_ramping(self):
mode, _, _ = _step(mode="RAMP_TO_RC", ramp_elapsed=0.2, ramp_s=0.5)
assert mode == "RAMP_TO_RC"
def test_completes_to_rc(self):
mode, _, _ = _step(mode="RAMP_TO_RC", ramp_elapsed=0.5, ramp_s=0.5)
assert mode == "RC"
def test_stick_override_instant_rc(self):
mode, override, instant = _step(
mode="RAMP_TO_RC", sticks_now_active=True, ch6_auto=True)
assert mode == "RC"
assert instant is True
def test_ch6_still_auto_no_override_cancels_back(self):
# CH6 still says AUTO, no override → cancel to RAMP_TO_AUTO
mode, _, _ = _step(mode="RAMP_TO_RC", ch6_auto=True, slam_ok=True,
override_active=False, ramp_elapsed=0.1)
assert mode == "RAMP_TO_AUTO"
def test_rc_link_lost_instant_rc(self):
_, _, instant = _step(mode="RAMP_TO_RC", rc_link_ok=False)
assert instant is True
# ─────────────────────────────────────────────────────────────────────────────
# blend_twist
# ─────────────────────────────────────────────────────────────────────────────
class TestBlendTwist:
def test_alpha_zero_zeros_output(self):
lx, az = blend_twist(1.0, 0.5, 0.0)
assert lx == pytest.approx(0.0)
assert az == pytest.approx(0.0)
def test_alpha_one_passes_through(self):
lx, az = blend_twist(0.4, -0.7, 1.0)
assert lx == pytest.approx(0.4)
assert az == pytest.approx(-0.7)
def test_alpha_half_scales(self):
lx, az = blend_twist(1.0, 1.0, 0.5)
assert lx == pytest.approx(0.5)
assert az == pytest.approx(0.5)
def test_alpha_above_one_clamped(self):
lx, az = blend_twist(1.0, 1.0, 1.5)
assert lx == pytest.approx(1.0)
assert az == pytest.approx(1.0)
def test_alpha_below_zero_clamped(self):
lx, az = blend_twist(1.0, 1.0, -0.5)
assert lx == pytest.approx(0.0)
assert az == pytest.approx(0.0)
def test_negative_cmd_vel_preserved(self):
lx, az = blend_twist(-0.5, -1.0, 0.5)
assert lx == pytest.approx(-0.25)
assert az == pytest.approx(-0.5)
# ─────────────────────────────────────────────────────────────────────────────
# Integration scenarios
# ─────────────────────────────────────────────────────────────────────────────
class TestIntegrationScenarios:
def test_full_rc_to_auto_ramp_cycle(self):
"""Simulate RC→RAMP_TO_AUTO→AUTO with 0.5s ramp at 20Hz."""
mode = "RC"
override = False
ramp_s = 0.5
dt = 1.0 / 20.0
t_ramp = 0.0
# CH6 flipped to AUTO, SLAM ok
mode, override, instant = step_state(
mode, 0.0, ramp_s, True, False, 5.0, 2.0, True, True, override)
assert mode == "RAMP_TO_AUTO"
t_ramp = 0.0
# Simulate 10 ticks (0.5s) → should complete
for tick in range(10):
t_ramp += dt
mode, override, instant = step_state(
mode, t_ramp, ramp_s, True, False, 5.0, 2.0, True, True, override)
assert mode == "AUTO"
assert alpha_for_mode("AUTO", t_ramp, ramp_s) == 1.0
def test_stick_override_then_return_to_auto(self):
"""Stick override while AUTO → RC → neutral 2s → back to AUTO."""
# Start in AUTO
mode, override, instant = _step(
mode="AUTO", ch6_auto=True, sticks_now_active=True)
assert mode == "RC"
assert override is True
assert instant is True
# Sticks neutral but < 2s — stays RC
mode, override, _ = _step(
mode="RC", ch6_auto=True, slam_ok=True,
override_active=True, neutral_elapsed=1.0, return_delay=2.0)
assert mode == "RC"
# Sticks neutral >= 2s — override clears, enters ramp
mode, override, _ = _step(
mode="RC", ch6_auto=True, slam_ok=True,
override_active=True, neutral_elapsed=2.0, return_delay=2.0)
assert mode == "RAMP_TO_AUTO"
assert override is False
def test_slam_lost_during_auto_graceful_exit(self):
"""SLAM fix lost during AUTO → graceful RAMP_TO_RC (not instant)."""
mode, _, instant = _step(mode="AUTO", ch6_auto=True, slam_ok=False)
assert mode == "RAMP_TO_RC"
assert not instant
def test_rc_link_lost_always_instant(self):
"""RC link lost from any state → instant RC."""
for start_mode in ("AUTO", "RAMP_TO_AUTO", "RAMP_TO_RC"):
mode, _, instant = _step(mode=start_mode, ch6_auto=True, rc_link_ok=False)
assert mode == "RC", f"from {start_mode}"
assert instant is True, f"from {start_mode}"
def test_blend_alpha_sequence_through_ramp(self):
"""Blend alpha is monotonically increasing during RAMP_TO_AUTO."""
ramp_s = 0.5
alphas = [
alpha_for_mode("RAMP_TO_AUTO", t * 0.05, ramp_s)
for t in range(11) # 0.0 to 0.5 s in 0.05 s steps
]
for a, b in zip(alphas, alphas[1:]):
assert b >= a
def test_ramp_to_rc_alpha_decreasing(self):
"""Blend alpha is monotonically decreasing during RAMP_TO_RC."""
ramp_s = 0.5
alphas = [
alpha_for_mode("RAMP_TO_RC", t * 0.05, ramp_s)
for t in range(11)
]
for a, b in zip(alphas, alphas[1:]):
assert b <= a
def test_no_auto_without_slam(self):
"""From RC, ch6=AUTO but slam=False → cannot enter AUTO path."""
mode, _, _ = _step(mode="RC", ch6_auto=True, slam_ok=False)
assert mode == "RC"
def test_no_auto_without_rc_link(self):
"""From RC, ch6=AUTO but rc_link=False → stays RC (instant)."""
mode, _, instant = _step(mode="RC", ch6_auto=True, rc_link_ok=False)
assert mode == "RC"
assert instant is True

View File

@ -1,89 +0,0 @@
/*
* battery.c Vbat ADC reading for CRSF telemetry uplink (Issue #103)
*
* Hardware: ADC3 channel IN11 on PC1 (ADC_BATT 1, Mamba F722S FC).
* Voltage divider: 10 (upper) / 1 (lower) VBAT_SCALE_NUM = 11.
*
* Vbat_mV = (raw × VBAT_AREF_MV × VBAT_SCALE_NUM) >> VBAT_ADC_BITS
* = (raw × 3300 × 11) / 4096
*/
#include "battery.h"
#include "config.h"
#include "stm32f7xx_hal.h"
static ADC_HandleTypeDef s_hadc;
static bool s_ready = false;
void battery_init(void) {
__HAL_RCC_ADC3_CLK_ENABLE();
__HAL_RCC_GPIOC_CLK_ENABLE();
/* PC1 → analog input (no pull, no speed) */
GPIO_InitTypeDef gpio = {0};
gpio.Pin = GPIO_PIN_1;
gpio.Mode = GPIO_MODE_ANALOG;
gpio.Pull = GPIO_NOPULL;
HAL_GPIO_Init(GPIOC, &gpio);
/* ADC3 — single-conversion, software trigger, 12-bit right-aligned */
s_hadc.Instance = ADC3;
s_hadc.Init.ClockPrescaler = ADC_CLOCK_SYNC_PCLK_DIV8; /* APB2/8 */
s_hadc.Init.Resolution = ADC_RESOLUTION_12B;
s_hadc.Init.ScanConvMode = DISABLE;
s_hadc.Init.ContinuousConvMode = DISABLE;
s_hadc.Init.DiscontinuousConvMode = DISABLE;
s_hadc.Init.ExternalTrigConvEdge = ADC_EXTERNALTRIGCONVEDGE_NONE;
s_hadc.Init.ExternalTrigConv = ADC_SOFTWARE_START;
s_hadc.Init.DataAlign = ADC_DATAALIGN_RIGHT;
s_hadc.Init.NbrOfConversion = 1;
s_hadc.Init.DMAContinuousRequests = DISABLE;
s_hadc.Init.EOCSelection = ADC_EOC_SINGLE_CONV;
if (HAL_ADC_Init(&s_hadc) != HAL_OK) return;
/* Channel IN11 (PC1) with 480-cycle sampling for stability */
ADC_ChannelConfTypeDef ch = {0};
ch.Channel = ADC_CHANNEL_11;
ch.Rank = 1;
ch.SamplingTime = ADC_SAMPLETIME_480CYCLES;
if (HAL_ADC_ConfigChannel(&s_hadc, &ch) != HAL_OK) return;
s_ready = true;
}
uint32_t battery_read_mv(void) {
if (!s_ready) return 0u;
HAL_ADC_Start(&s_hadc);
if (HAL_ADC_PollForConversion(&s_hadc, 2u) != HAL_OK) return 0u;
uint32_t raw = HAL_ADC_GetValue(&s_hadc);
HAL_ADC_Stop(&s_hadc);
/* Vbat_mV = raw × (VREF_mV × scale) / ADC_counts */
return (raw * (uint32_t)VBAT_AREF_MV * VBAT_SCALE_NUM) /
((1u << VBAT_ADC_BITS));
}
/*
* Coarse SoC estimate.
* 3S LiPo: 9.9 V (0%) 12.6 V (100%) detect by Vbat < 13 V
* 4S LiPo: 13.2 V (0%) 16.8 V (100%) detect by Vbat 13 V
*/
uint8_t battery_estimate_pct(uint32_t voltage_mv) {
uint32_t v_min_mv, v_max_mv;
if (voltage_mv >= 13000u) {
/* 4S LiPo */
v_min_mv = 13200u;
v_max_mv = 16800u;
} else {
/* 3S LiPo */
v_min_mv = 9900u;
v_max_mv = 12600u;
}
if (voltage_mv <= v_min_mv) return 0u;
if (voltage_mv >= v_max_mv) return 100u;
return (uint8_t)(((voltage_mv - v_min_mv) * 100u) / (v_max_mv - v_min_mv));
}

View File

@ -291,64 +291,3 @@ int16_t crsf_to_range(uint16_t val, int16_t min, int16_t max) {
if (r > max) r = max;
return (int16_t)r;
}
/* ------------------------------------------------------------------ */
/* Telemetry TX helpers */
/* ------------------------------------------------------------------ */
/*
* Build a CRSF frame in `buf` and return the total byte count.
* buf must be at least CRSF_MAX_FRAME_LEN bytes.
* frame_type : CRSF type byte (e.g. 0x08 battery, 0x21 flight mode)
* payload : frame payload bytes (excluding type, CRC)
* plen : payload length in bytes
*/
static uint8_t crsf_build_frame(uint8_t *buf, uint8_t frame_type,
const uint8_t *payload, uint8_t plen) {
/* Total frame = SYNC + LEN + TYPE + PAYLOAD + CRC */
uint8_t frame_len = 2u + 1u + plen + 1u; /* SYNC + LEN + TYPE + payload + CRC */
if (frame_len > CRSF_MAX_FRAME_LEN) return 0;
buf[0] = CRSF_SYNC; /* 0xC8 */
buf[1] = (uint8_t)(plen + 2u); /* LEN = TYPE + payload + CRC */
buf[2] = frame_type;
memcpy(&buf[3], payload, plen);
buf[frame_len - 1] = crsf_frame_crc(buf, frame_len);
return frame_len;
}
/*
* crsf_send_battery() type 0x08 battery sensor.
* voltage_mv units of 100 mV (big-endian uint16)
* current_ma units of 100 mA (big-endian uint16)
* remaining_pct 0100 % (uint8); capacity mAh always 0 (no coulomb counter)
*/
void crsf_send_battery(uint32_t voltage_mv, uint32_t current_ma,
uint8_t remaining_pct) {
uint16_t v100 = (uint16_t)(voltage_mv / 100u); /* 100 mV units */
uint16_t c100 = (uint16_t)(current_ma / 100u); /* 100 mA units */
/* Payload: [v_hi][v_lo][c_hi][c_lo][cap_hi][cap_mid][cap_lo][remaining] */
uint8_t payload[8] = {
(uint8_t)(v100 >> 8), (uint8_t)(v100 & 0xFF),
(uint8_t)(c100 >> 8), (uint8_t)(c100 & 0xFF),
0, 0, 0, /* capacity mAh — not tracked */
remaining_pct,
};
uint8_t frame[CRSF_MAX_FRAME_LEN];
uint8_t flen = crsf_build_frame(frame, 0x08u, payload, sizeof(payload));
if (flen) HAL_UART_Transmit(&s_uart, frame, flen, 5u);
}
/*
* crsf_send_flight_mode() type 0x21 flight mode text.
* Displays on the handset's OSD/status bar.
* "ARMED\0" when armed (5 payload bytes + null)
* "DISARM\0" when not (7 payload bytes + null)
*/
void crsf_send_flight_mode(bool armed) {
const char *text = armed ? "ARMED" : "DISARM";
uint8_t plen = (uint8_t)(strlen(text) + 1u); /* include null terminator */
uint8_t frame[CRSF_MAX_FRAME_LEN];
uint8_t flen = crsf_build_frame(frame, 0x21u, (const uint8_t *)text, plen);
if (flen) HAL_UART_Transmit(&s_uart, frame, flen, 5u);
}

View File

@ -16,7 +16,6 @@
#include "bmp280.h"
#include "mag.h"
#include "jetson_cmd.h"
#include "battery.h"
#include <math.h>
#include <string.h>
#include <stdio.h>
@ -143,9 +142,6 @@ int main(void) {
mode_manager_t mode;
mode_manager_init(&mode);
/* Init battery ADC (PC1/ADC3 — Vbat divider 11:1) for CRSF telemetry */
battery_init();
/* Probe I2C1 for optional sensors — skip gracefully if not found */
int baro_chip = 0; /* chip_id: 0x58=BMP280, 0x60=BME280, 0=absent */
mag_type_t mag_type = MAG_NONE;
@ -171,7 +167,6 @@ int main(void) {
uint32_t send_tick = 0;
uint32_t balance_tick = 0;
uint32_t esc_tick = 0;
uint32_t crsf_telem_tick = 0; /* CRSF uplink telemetry TX timer */
const float dt = 1.0f / PID_LOOP_HZ; /* 1ms at 1kHz */
while (1) {
@ -318,17 +313,6 @@ int main(void) {
}
}
/* CRSF telemetry uplink — battery voltage + arm state at 1 Hz.
* Sends battery sensor frame (0x08) and flight mode frame (0x21)
* back to ELRS TX module so the pilot's handset OSD shows Vbat + state. */
if (now - crsf_telem_tick >= (1000u / CRSF_TELEMETRY_HZ)) {
crsf_telem_tick = now;
uint32_t vbat_mv = battery_read_mv();
uint8_t soc_pct = battery_estimate_pct(vbat_mv);
crsf_send_battery(vbat_mv, 0u, soc_pct);
crsf_send_flight_mode(bal.state == BALANCE_ARMED);
}
/* USB telemetry at 50Hz (only when streaming enabled and calibration done) */
if (cdc_streaming && mpu6000_is_calibrated() && now - send_tick >= 20) {
send_tick = now;

View File

@ -1,237 +0,0 @@
"""
test_crsf_frames.py Unit tests for CRSF telemetry frame building (Issue #103).
Mirrors the C logic in src/crsf.c for crsf_send_battery() and
crsf_send_flight_mode() so frame layout and CRC can be verified
without flashing hardware.
Run with: pytest test/test_crsf_frames.py -v
"""
import struct
import pytest
# ── CRC8 DVB-S2 (polynomial 0xD5) ─────────────────────────────────────────
def crc8_dvb_s2(crc: int, byte: int) -> int:
crc ^= byte
for _ in range(8):
crc = ((crc << 1) ^ 0xD5) & 0xFF if (crc & 0x80) else (crc << 1) & 0xFF
return crc
def crsf_frame_crc(frame: bytes) -> int:
"""CRC covers frame[2] (type) through frame[-2] (last payload byte)."""
crc = 0
for b in frame[2:-1]:
crc = crc8_dvb_s2(crc, b)
return crc
# ── Frame builders matching C implementation ───────────────────────────────
CRSF_SYNC = 0xC8
def build_battery_frame(voltage_mv: int, current_ma: int,
remaining_pct: int) -> bytes:
"""
Type 0x08 battery sensor.
voltage : 100 mV units, uint16 big-endian
current : 100 mA units, uint16 big-endian
capacity : 0 mAh (not tracked), uint24 big-endian
remaining: 0100 %, uint8
Total payload = 8 bytes.
"""
v100 = voltage_mv // 100
c100 = current_ma // 100
payload = struct.pack('>HH3sB',
v100, c100,
b'\x00\x00\x00',
remaining_pct)
frame_type = 0x08
# SYNC + LEN(TYPE+payload+CRC) + TYPE + payload + CRC
frame_body = bytes([CRSF_SYNC, len(payload) + 2, frame_type]) + payload
frame = frame_body + b'\x00' # placeholder CRC slot
crc = crsf_frame_crc(frame)
return frame_body + bytes([crc])
def build_flight_mode_frame(armed: bool) -> bytes:
"""Type 0x21 — flight mode text, null-terminated."""
text = b'ARMED\x00' if armed else b'DISARM\x00'
frame_type = 0x21
frame_body = bytes([CRSF_SYNC, len(text) + 2, frame_type]) + text
frame = frame_body + b'\x00'
crc = crsf_frame_crc(frame)
return frame_body + bytes([crc])
# ── Helpers ────────────────────────────────────────────────────────────────
def validate_frame(frame: bytes):
"""Assert basic CRSF frame invariants."""
assert frame[0] == CRSF_SYNC, "bad sync byte"
length_field = frame[1]
total = length_field + 2 # SYNC + LEN + rest
assert len(frame) == total, f"frame length mismatch: {len(frame)} != {total}"
assert len(frame) <= 64, "frame too long (CRSF max 64 bytes)"
expected_crc = crsf_frame_crc(frame)
assert frame[-1] == expected_crc, \
f"CRC mismatch: got {frame[-1]:#04x}, expected {expected_crc:#04x}"
# ── Battery frame tests ────────────────────────────────────────────────────
class TestBatteryFrame:
def test_sync_byte(self):
f = build_battery_frame(12600, 0, 100)
assert f[0] == 0xC8
def test_frame_type(self):
f = build_battery_frame(12600, 0, 100)
assert f[2] == 0x08
def test_frame_invariants(self):
validate_frame(build_battery_frame(12600, 5000, 80))
def test_voltage_encoding_3s_full(self):
"""12.6 V → 126 in 100 mV units, big-endian."""
f = build_battery_frame(12600, 0, 100)
v100 = (f[3] << 8) | f[4]
assert v100 == 126
def test_voltage_encoding_4s_full(self):
"""16.8 V → 168."""
f = build_battery_frame(16800, 0, 100)
v100 = (f[3] << 8) | f[4]
assert v100 == 168
def test_current_encoding(self):
"""5000 mA → 50 in 100 mA units."""
f = build_battery_frame(12000, 5000, 75)
c100 = (f[5] << 8) | f[6]
assert c100 == 50
def test_remaining_pct(self):
f = build_battery_frame(11000, 0, 42)
assert f[10] == 42
def test_capacity_zero(self):
"""Capacity bytes (cap_hi, cap_mid, cap_lo) are zero — no coulomb counter."""
f = build_battery_frame(12600, 0, 100)
assert f[7] == 0 and f[8] == 0 and f[9] == 0
def test_crc_correct(self):
f = build_battery_frame(11500, 2000, 65)
validate_frame(f)
def test_zero_voltage(self):
"""Disconnected battery → 0 mV, 0 %."""
f = build_battery_frame(0, 0, 0)
validate_frame(f)
v100 = (f[3] << 8) | f[4]
assert v100 == 0
assert f[10] == 0
def test_total_frame_length(self):
"""Battery frame: SYNC(1)+LEN(1)+TYPE(1)+payload(8)+CRC(1) = 12 bytes."""
f = build_battery_frame(12000, 0, 80)
assert len(f) == 12
# ── Flight mode frame tests ────────────────────────────────────────────────
class TestFlightModeFrame:
def test_armed_text(self):
f = build_flight_mode_frame(True)
payload = f[3:-1]
assert payload == b'ARMED\x00'
def test_disarmed_text(self):
f = build_flight_mode_frame(False)
payload = f[3:-1]
assert payload == b'DISARM\x00'
def test_frame_type(self):
assert build_flight_mode_frame(True)[2] == 0x21
assert build_flight_mode_frame(False)[2] == 0x21
def test_crc_armed(self):
validate_frame(build_flight_mode_frame(True))
def test_crc_disarmed(self):
validate_frame(build_flight_mode_frame(False))
def test_armed_frame_length(self):
"""ARMED\0 = 6 bytes payload → total 10 bytes."""
f = build_flight_mode_frame(True)
assert len(f) == 10
def test_disarmed_frame_length(self):
"""DISARM\0 = 7 bytes payload → total 11 bytes."""
f = build_flight_mode_frame(False)
assert len(f) == 11
# ── CRC8 DVB-S2 self-test ─────────────────────────────────────────────────
class TestCRC8:
def test_known_vector(self):
"""Verify CRC8 DVB-S2 against known value (poly 0xD5, init 0)."""
# CRC of single byte 0xAB with poly 0xD5, init 0 → 0xC8
crc = crc8_dvb_s2(0, 0xAB)
assert crc == 0xC8
def test_different_payloads_differ(self):
f1 = build_battery_frame(12600, 0, 100)
f2 = build_battery_frame(11000, 0, 50)
assert f1[-1] != f2[-1], "different payloads should have different CRCs"
def test_crc_covers_type(self):
"""Changing the frame type changes the CRC."""
fa = build_battery_frame(12600, 0, 100) # type 0x08
fb = build_flight_mode_frame(True) # type 0x21
# Both frames differ in type byte and thus CRC
assert fa[-1] != fb[-1]
# ── battery_estimate_pct logic mirrored in Python ─────────────────────────
def battery_estimate_pct(voltage_mv: int) -> int:
"""Python mirror of battery_estimate_pct() in battery.c."""
if voltage_mv >= 13000:
v_min, v_max = 13200, 16800
else:
v_min, v_max = 9900, 12600
if voltage_mv <= v_min:
return 0
if voltage_mv >= v_max:
return 100
return int((voltage_mv - v_min) * 100 / (v_max - v_min))
class TestBatteryEstimatePct:
def test_3s_full(self):
assert battery_estimate_pct(12600) == 100
def test_3s_empty(self):
assert battery_estimate_pct(9900) == 0
def test_3s_mid(self):
pct = battery_estimate_pct(11250)
assert 45 <= pct <= 55
def test_4s_full(self):
assert battery_estimate_pct(16800) == 100
def test_4s_empty(self):
assert battery_estimate_pct(13200) == 0
def test_3s_over_voltage(self):
"""13000 mV triggers 4S branch (v_min=13200) → classified as dead 4S → 0%."""
assert battery_estimate_pct(13000) == 0
def test_zero_voltage(self):
assert battery_estimate_pct(0) == 0

View File

@ -1,2 +0,0 @@
node_modules/
dist/

View File

@ -1,12 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Saltybot Social Dashboard</title>
</head>
<body>
<div id="root"></div>
<script type="module" src="/src/main.jsx"></script>
</body>
</html>

File diff suppressed because it is too large Load Diff

View File

@ -1,24 +0,0 @@
{
"name": "saltybot-social-dashboard",
"version": "1.0.0",
"private": true,
"type": "module",
"description": "Social-bot monitoring and configuration dashboard",
"scripts": {
"dev": "vite --port 8080 --host",
"build": "vite build",
"preview": "vite preview --port 8080 --host"
},
"dependencies": {
"react": "^18.3.1",
"react-dom": "^18.3.1",
"roslib": "^1.4.1"
},
"devDependencies": {
"@vitejs/plugin-react": "^4.3.1",
"autoprefixer": "^10.4.20",
"postcss": "^8.4.47",
"tailwindcss": "^3.4.14",
"vite": "^5.4.10"
}
}

View File

@ -1,6 +0,0 @@
export default {
plugins: {
tailwindcss: {},
autoprefixer: {},
},
};

View File

@ -1,165 +0,0 @@
/**
* App.jsx Saltybot Social Dashboard root component.
*
* Layout:
* [TopBar: connection config + pipeline state badge]
* [TabNav: Status | Faces | Conversation | Personality | Navigation]
* [TabContent]
*/
import { useState, useCallback } from 'react';
import { useRosbridge } from './hooks/useRosbridge.js';
import { StatusPanel } from './components/StatusPanel.jsx';
import { FaceGallery } from './components/FaceGallery.jsx';
import { ConversationLog } from './components/ConversationLog.jsx';
import { PersonalityTuner } from './components/PersonalityTuner.jsx';
import { NavModeSelector } from './components/NavModeSelector.jsx';
const TABS = [
{ id: 'status', label: 'Status', icon: '⬤' },
{ id: 'faces', label: 'Faces', icon: '◉' },
{ id: 'conversation', label: 'Conversation', icon: '◌' },
{ id: 'personality', label: 'Personality', icon: '◈' },
{ id: 'navigation', label: 'Navigation', icon: '◫' },
];
const DEFAULT_WS_URL = 'ws://localhost:9090';
function ConnectionBar({ url, setUrl, connected, error }) {
const [editing, setEditing] = useState(false);
const [draft, setDraft] = useState(url);
const handleApply = () => {
setUrl(draft);
setEditing(false);
};
return (
<div className="flex items-center gap-2 text-xs">
{/* Connection dot */}
<div className={`w-2 h-2 rounded-full shrink-0 ${
connected ? 'bg-green-400' : error ? 'bg-red-500' : 'bg-gray-600'
}`} />
{editing ? (
<div className="flex items-center gap-1">
<input
value={draft}
onChange={(e) => setDraft(e.target.value)}
onKeyDown={(e) => { if (e.key === 'Enter') handleApply(); if (e.key === 'Escape') setEditing(false); }}
autoFocus
className="bg-gray-900 border border-cyan-800 rounded px-2 py-0.5 text-cyan-300 w-52 focus:outline-none"
/>
<button onClick={handleApply} className="px-2 py-0.5 rounded bg-cyan-950 border border-cyan-700 text-cyan-400 hover:bg-cyan-900">Connect</button>
<button onClick={() => setEditing(false)} className="text-gray-600 hover:text-gray-400 px-1"></button>
</div>
) : (
<button
onClick={() => { setDraft(url); setEditing(true); }}
className="text-gray-500 hover:text-cyan-400 transition-colors truncate max-w-40"
title={url}
>
{connected ? (
<span className="text-green-400">rosbridge: {url}</span>
) : error ? (
<span className="text-red-400" title={error}> {url}</span>
) : (
<span className="text-gray-500">{url} (connecting)</span>
)}
</button>
)}
</div>
);
}
export default function App() {
const [wsUrl, setWsUrl] = useState(DEFAULT_WS_URL);
const [activeTab, setActiveTab] = useState('status');
const { connected, error, subscribe, publish, callService, setParam } = useRosbridge(wsUrl);
// Memoized publish for NavModeSelector (avoids recreating on every render)
const publishFn = useCallback(
(name, type, data) => publish(name, type, data),
[publish]
);
return (
<div className="min-h-screen flex flex-col bg-[#050510] text-gray-300 font-mono">
{/* ── Top Bar ── */}
<header className="flex items-center justify-between px-4 py-2 bg-[#070712] border-b border-cyan-950 shrink-0 gap-2 flex-wrap">
<div className="flex items-center gap-3">
<span className="text-orange-500 font-bold tracking-widest text-sm"> SALTYBOT</span>
<span className="text-cyan-800 text-xs hidden sm:inline">SOCIAL DASHBOARD</span>
</div>
<ConnectionBar
url={wsUrl}
setUrl={setWsUrl}
connected={connected}
error={error}
/>
</header>
{/* ── Tab Nav ── */}
<nav className="bg-[#070712] border-b border-cyan-950 shrink-0">
<div className="flex overflow-x-auto">
{TABS.map((tab) => (
<button
key={tab.id}
onClick={() => setActiveTab(tab.id)}
className={`flex items-center gap-1.5 px-4 py-2.5 text-xs font-bold tracking-widest whitespace-nowrap border-b-2 transition-colors ${
activeTab === tab.id
? 'border-cyan-500 text-cyan-300 bg-cyan-950 bg-opacity-30'
: 'border-transparent text-gray-500 hover:text-gray-300 hover:border-gray-700'
}`}
>
<span className="hidden sm:inline text-base leading-none">{tab.icon}</span>
{tab.label.toUpperCase()}
</button>
))}
</div>
</nav>
{/* ── Content ── */}
<main className="flex-1 overflow-y-auto p-4">
{activeTab === 'status' && (
<StatusPanel subscribe={subscribe} />
)}
{activeTab === 'faces' && (
<FaceGallery
subscribe={subscribe}
callService={callService}
/>
)}
{activeTab === 'conversation' && (
<ConversationLog subscribe={subscribe} />
)}
{activeTab === 'personality' && (
<PersonalityTuner
subscribe={subscribe}
setParam={setParam}
/>
)}
{activeTab === 'navigation' && (
<NavModeSelector
subscribe={subscribe}
publish={publishFn}
/>
)}
</main>
{/* ── Footer ── */}
<footer className="bg-[#070712] border-t border-cyan-950 px-4 py-1.5 flex items-center justify-between text-xs text-gray-700 shrink-0">
<span>rosbridge: <code className="text-gray-600">{wsUrl}</code></span>
<span className={connected ? 'text-green-700' : 'text-red-900'}>
{connected ? 'CONNECTED' : 'DISCONNECTED'}
</span>
</footer>
</div>
);
}

View File

@ -1,221 +0,0 @@
/**
* ConversationLog.jsx Real-time transcript with speaker labels.
*
* Subscribes:
* /social/speech/transcript (SpeechTranscript) human utterances
* /social/conversation/response (ConversationResponse) bot replies
*/
import { useEffect, useRef, useState } from 'react';
const MAX_ENTRIES = 200;
function formatTime(ts) {
return new Date(ts).toLocaleTimeString([], { hour: '2-digit', minute: '2-digit', second: '2-digit' });
}
function HumanBubble({ entry }) {
return (
<div className="flex flex-col items-end gap-0.5">
<div className="flex items-center gap-2 text-xs text-gray-500">
<span className="text-xs">{formatTime(entry.ts)}</span>
<span className="text-blue-400 font-bold">{entry.speaker || 'unknown'}</span>
{entry.confidence != null && (
<span className="text-gray-600">({Math.round(entry.confidence * 100)}%)</span>
)}
{entry.partial && <span className="text-amber-600 text-xs italic">partial</span>}
</div>
<div className={`max-w-xs sm:max-w-md bubble-human rounded-lg px-3 py-2 text-sm text-blue-100 ${entry.partial ? 'bubble-partial' : ''}`}>
{entry.text}
</div>
</div>
);
}
function BotBubble({ entry }) {
return (
<div className="flex flex-col items-start gap-0.5">
<div className="flex items-center gap-2 text-xs text-gray-500">
<span className="text-teal-400 font-bold">Salty</span>
{entry.speaker && <span className="text-gray-600"> {entry.speaker}</span>}
<span className="text-xs">{formatTime(entry.ts)}</span>
{entry.partial && <span className="text-amber-600 text-xs italic">streaming</span>}
</div>
<div className={`max-w-xs sm:max-w-md bubble-bot rounded-lg px-3 py-2 text-sm text-teal-100 ${entry.partial ? 'bubble-partial' : ''}`}>
{entry.text}
</div>
</div>
);
}
export function ConversationLog({ subscribe }) {
const [entries, setEntries] = useState([]);
const [autoScroll, setAutoScroll] = useState(true);
const bottomRef = useRef(null);
const scrollRef = useRef(null);
// Auto-scroll to bottom when new entries arrive
useEffect(() => {
if (autoScroll && bottomRef.current) {
bottomRef.current.scrollIntoView({ behavior: 'smooth' });
}
}, [entries, autoScroll]);
// Human transcript
useEffect(() => {
const unsub = subscribe(
'/social/speech/transcript',
'saltybot_social_msgs/SpeechTranscript',
(msg) => {
setEntries((prev) => {
const entry = {
id: `h-${msg.header?.stamp?.sec ?? Date.now()}-${msg.turn_id ?? Math.random()}`,
type: 'human',
text: msg.text,
speaker: msg.speaker_id,
confidence: msg.confidence,
partial: msg.is_partial,
ts: Date.now(),
};
// Replace partial entry with same turn if exists
if (msg.is_partial) {
const idx = prev.findLastIndex(
(e) => e.type === 'human' && e.partial && e.speaker === msg.speaker_id
);
if (idx !== -1) {
const updated = [...prev];
updated[idx] = entry;
return updated;
}
} else {
// Replace any trailing partial for same speaker
const idx = prev.findLastIndex(
(e) => e.type === 'human' && e.partial && e.speaker === msg.speaker_id
);
if (idx !== -1) {
const updated = [...prev];
updated[idx] = { ...entry, id: prev[idx].id };
return updated.slice(-MAX_ENTRIES);
}
}
return [...prev, entry].slice(-MAX_ENTRIES);
});
}
);
return unsub;
}, [subscribe]);
// Bot response
useEffect(() => {
const unsub = subscribe(
'/social/conversation/response',
'saltybot_social_msgs/ConversationResponse',
(msg) => {
setEntries((prev) => {
const entry = {
id: `b-${msg.turn_id ?? Math.random()}`,
type: 'bot',
text: msg.text,
speaker: msg.speaker_id,
partial: msg.is_partial,
turnId: msg.turn_id,
ts: Date.now(),
};
// Replace streaming partial with same turn_id
if (msg.turn_id != null) {
const idx = prev.findLastIndex(
(e) => e.type === 'bot' && e.turnId === msg.turn_id
);
if (idx !== -1) {
const updated = [...prev];
updated[idx] = entry;
return updated;
}
}
return [...prev, entry].slice(-MAX_ENTRIES);
});
}
);
return unsub;
}, [subscribe]);
const handleScroll = () => {
const el = scrollRef.current;
if (!el) return;
const atBottom = el.scrollHeight - el.scrollTop - el.clientHeight < 60;
setAutoScroll(atBottom);
};
const handleClear = () => setEntries([]);
return (
<div className="flex flex-col h-full" style={{ minHeight: '400px', maxHeight: '70vh' }}>
{/* Toolbar */}
<div className="flex items-center justify-between mb-2 shrink-0">
<div className="text-cyan-700 text-xs font-bold tracking-widest">
CONVERSATION LOG ({entries.length})
</div>
<div className="flex items-center gap-2">
<label className="flex items-center gap-1 cursor-pointer">
<input
type="checkbox"
checked={autoScroll}
onChange={(e) => setAutoScroll(e.target.checked)}
className="accent-cyan-500 w-3 h-3"
/>
<span className="text-gray-500 text-xs">Auto-scroll</span>
</label>
<button
onClick={handleClear}
className="px-2 py-0.5 rounded border border-gray-700 text-gray-500 hover:text-gray-300 text-xs"
>
Clear
</button>
</div>
</div>
{/* Scroll container */}
<div
ref={scrollRef}
onScroll={handleScroll}
className="flex-1 overflow-y-auto space-y-3 pr-1"
style={{ minHeight: '300px' }}
>
{entries.length === 0 ? (
<div className="text-gray-600 text-sm text-center py-12 border border-dashed border-gray-800 rounded-lg">
Waiting for conversation
</div>
) : (
entries.map((entry) =>
entry.type === 'human' ? (
<HumanBubble key={entry.id} entry={entry} />
) : (
<BotBubble key={entry.id} entry={entry} />
)
)
)}
<div ref={bottomRef} />
</div>
{/* Legend */}
<div className="flex gap-4 mt-2 pt-2 border-t border-gray-900 shrink-0">
<div className="flex items-center gap-1.5">
<div className="w-3 h-3 rounded-sm bg-blue-950 border border-blue-700" />
<span className="text-gray-600 text-xs">Human</span>
</div>
<div className="flex items-center gap-1.5">
<div className="w-3 h-3 rounded-sm bg-teal-950 border border-teal-700" />
<span className="text-gray-600 text-xs">Salty</span>
</div>
<div className="flex items-center gap-1.5">
<div className="w-2 h-2 rounded-full bg-amber-600 opacity-60" />
<span className="text-gray-600 text-xs">Streaming</span>
</div>
</div>
</div>
);
}

View File

@ -1,237 +0,0 @@
/**
* FaceGallery.jsx Enrolled person management.
*
* Services:
* /social/enrollment/list_persons (ListPersons.srv)
* /social/enrollment/enroll_person (EnrollPerson.srv)
* /social/enrollment/delete_person (DeletePerson.srv)
*
* Topics (live detections):
* /social/face_detections (FaceDetectionArray) optional, shows live bboxes
*/
import { useEffect, useState, useCallback } from 'react';
function PersonCard({ person, onDelete, liveDetection }) {
const [deleting, setDeleting] = useState(false);
const enrolledAt = person.enrolled_at
? new Date(person.enrolled_at.sec * 1000).toLocaleDateString()
: '—';
const handleDelete = async () => {
if (!confirm(`Delete ${person.person_name}?`)) return;
setDeleting(true);
try {
await onDelete(person.person_id);
} finally {
setDeleting(false);
}
};
const isActive = liveDetection?.face_id === person.person_id;
return (
<div className={`bg-gray-900 rounded-lg border p-3 flex flex-col gap-2 transition-colors ${
isActive ? 'border-green-600 bg-green-950' : 'border-gray-800'
}`}>
{/* Avatar placeholder */}
<div className="w-full aspect-square rounded bg-gray-800 border border-gray-700 flex items-center justify-center relative overflow-hidden">
<svg className="w-12 h-12 text-gray-600" fill="currentColor" viewBox="0 0 24 24">
<path d="M12 12c2.7 0 4.8-2.1 4.8-4.8S14.7 2.4 12 2.4 7.2 4.5 7.2 7.2 9.3 12 12 12zm0 2.4c-3.2 0-9.6 1.6-9.6 4.8v2.4h19.2v-2.4c0-3.2-6.4-4.8-9.6-4.8z"/>
</svg>
{isActive && (
<div className="absolute inset-0 border-2 border-green-500 rounded pointer-events-none" />
)}
<div className="absolute top-1 right-1">
<div className={`w-2 h-2 rounded-full ${isActive ? 'bg-green-400' : 'bg-gray-700'}`} />
</div>
</div>
{/* Info */}
<div>
<div className="text-cyan-300 text-sm font-bold truncate">{person.person_name}</div>
<div className="text-gray-600 text-xs">ID: {person.person_id}</div>
<div className="text-gray-600 text-xs">Samples: {person.sample_count}</div>
<div className="text-gray-600 text-xs">Enrolled: {enrolledAt}</div>
</div>
{/* Delete */}
<button
onClick={handleDelete}
disabled={deleting}
className="w-full py-1 text-xs rounded border border-red-900 text-red-400 hover:bg-red-950 hover:border-red-700 disabled:opacity-40 transition-colors"
>
{deleting ? 'Deleting…' : 'Delete'}
</button>
</div>
);
}
export function FaceGallery({ subscribe, callService }) {
const [persons, setPersons] = useState([]);
const [loading, setLoading] = useState(false);
const [error, setError] = useState(null);
const [enrolling, setEnrolling] = useState(false);
const [newName, setNewName] = useState('');
const [newSamples, setNewSamples] = useState(5);
const [liveDetections, setLiveDetections] = useState([]);
const loadPersons = useCallback(async () => {
setLoading(true);
setError(null);
try {
const result = await callService(
'/social/enrollment/list_persons',
'saltybot_social_msgs/srv/ListPersons',
{}
);
setPersons(result?.persons ?? []);
} catch (e) {
setError('Service unavailable: ' + (e?.message ?? e));
} finally {
setLoading(false);
}
}, [callService]);
useEffect(() => {
loadPersons();
}, [loadPersons]);
// Live face detections
useEffect(() => {
const unsub = subscribe(
'/social/face_detections',
'saltybot_social_msgs/FaceDetectionArray',
(msg) => setLiveDetections(msg.faces ?? [])
);
return unsub;
}, [subscribe]);
const handleEnroll = async () => {
if (!newName.trim()) return;
setEnrolling(true);
setError(null);
try {
const result = await callService(
'/social/enrollment/enroll_person',
'saltybot_social_msgs/srv/EnrollPerson',
{ name: newName.trim(), mode: 'capture', n_samples: newSamples }
);
if (result?.success) {
setNewName('');
await loadPersons();
} else {
setError(result?.message ?? 'Enrollment failed');
}
} catch (e) {
setError('Enroll error: ' + (e?.message ?? e));
} finally {
setEnrolling(false);
}
};
const handleDelete = async (personId) => {
try {
await callService(
'/social/enrollment/delete_person',
'saltybot_social_msgs/srv/DeletePerson',
{ person_id: personId }
);
await loadPersons();
} catch (e) {
setError('Delete error: ' + (e?.message ?? e));
}
};
return (
<div className="space-y-4">
{/* Enroll new person */}
<div className="bg-gray-950 rounded-lg border border-cyan-950 p-4">
<div className="text-cyan-700 text-xs font-bold tracking-widest mb-3">ENROLL NEW PERSON</div>
<div className="flex flex-col sm:flex-row gap-2">
<input
type="text"
placeholder="Person name…"
value={newName}
onChange={(e) => setNewName(e.target.value)}
onKeyDown={(e) => e.key === 'Enter' && handleEnroll()}
className="flex-1 bg-gray-900 border border-gray-700 rounded px-3 py-1.5 text-sm text-gray-200 placeholder-gray-600 focus:outline-none focus:border-cyan-700"
/>
<div className="flex items-center gap-2">
<span className="text-gray-500 text-xs shrink-0">Samples:</span>
<input
type="number"
min={1}
max={20}
value={newSamples}
onChange={(e) => setNewSamples(Number(e.target.value))}
className="w-16 bg-gray-900 border border-gray-700 rounded px-2 py-1.5 text-sm text-gray-200 focus:outline-none focus:border-cyan-700"
/>
</div>
<button
onClick={handleEnroll}
disabled={enrolling || !newName.trim()}
className="px-4 py-1.5 rounded bg-cyan-950 hover:bg-cyan-900 border border-cyan-700 text-cyan-300 text-sm disabled:opacity-40 transition-colors"
>
{enrolling ? 'Capturing…' : 'Enroll'}
</button>
</div>
{enrolling && (
<div className="mt-2 text-amber-400 text-xs animate-pulse">
Capturing face samples look at the camera
</div>
)}
</div>
{/* Error */}
{error && (
<div className="bg-red-950 border border-red-800 rounded px-3 py-2 text-red-300 text-xs">
{error}
</div>
)}
{/* Gallery header */}
<div className="flex items-center justify-between">
<div className="text-cyan-700 text-xs font-bold tracking-widest">
ENROLLED PERSONS ({persons.length})
</div>
<div className="flex items-center gap-2">
{liveDetections.length > 0 && (
<div className="flex items-center gap-1 text-green-400 text-xs">
<div className="w-1.5 h-1.5 rounded-full bg-green-400 animate-pulse" />
{liveDetections.length} detected
</div>
)}
<button
onClick={loadPersons}
disabled={loading}
className="px-2 py-1 rounded border border-gray-700 text-gray-400 hover:text-gray-200 text-xs disabled:opacity-40 transition-colors"
>
{loading ? '…' : 'Refresh'}
</button>
</div>
</div>
{/* Grid */}
{loading && persons.length === 0 ? (
<div className="text-gray-600 text-sm text-center py-8">Loading</div>
) : persons.length === 0 ? (
<div className="text-gray-600 text-sm text-center py-8 border border-dashed border-gray-800 rounded-lg">
No enrolled persons
</div>
) : (
<div className="grid grid-cols-2 sm:grid-cols-3 lg:grid-cols-4 gap-3">
{persons.map((p) => (
<PersonCard
key={p.person_id}
person={p}
onDelete={handleDelete}
liveDetection={liveDetections.find(d => d.face_id === p.person_id)}
/>
))}
</div>
)}
</div>
);
}

View File

@ -1,176 +0,0 @@
/**
* NavModeSelector.jsx Follow mode switcher.
*
* Publishes: /social/nav/mode (std_msgs/String)
* Subscribes: /social/nav/mode (std_msgs/String) echoed back by social_nav_node
* /social/nav/status (std_msgs/String) freeform status string
*/
import { useEffect, useState } from 'react';
const MODES = [
{
id: 'shadow',
label: 'SHADOW',
icon: '👤',
description: 'Follow directly behind at distance',
color: 'border-blue-700 text-blue-300 bg-blue-950',
activeColor: 'border-blue-400 text-blue-100 bg-blue-900 mode-active',
},
{
id: 'lead',
label: 'LEAD',
icon: '➡',
description: 'Robot moves ahead, person follows',
color: 'border-green-700 text-green-300 bg-green-950',
activeColor: 'border-green-400 text-green-100 bg-green-900 mode-active',
},
{
id: 'side',
label: 'SIDE',
icon: '↔',
description: 'Walk side-by-side',
color: 'border-purple-700 text-purple-300 bg-purple-950',
activeColor: 'border-purple-400 text-purple-100 bg-purple-900 mode-active',
},
{
id: 'orbit',
label: 'ORBIT',
icon: '⟳',
description: 'Circle around the tracked person',
color: 'border-amber-700 text-amber-300 bg-amber-950',
activeColor: 'border-amber-400 text-amber-100 bg-amber-900 mode-active',
},
{
id: 'loose',
label: 'LOOSE',
icon: '⬡',
description: 'Follow with generous spacing',
color: 'border-teal-700 text-teal-300 bg-teal-950',
activeColor: 'border-teal-400 text-teal-100 bg-teal-900 mode-active',
},
{
id: 'tight',
label: 'TIGHT',
icon: '⬟',
description: 'Follow closely, minimal gap',
color: 'border-red-700 text-red-300 bg-red-950',
activeColor: 'border-red-400 text-red-100 bg-red-900 mode-active',
},
];
const VOICE_COMMANDS = [
{ mode: 'shadow', cmd: '"shadow" / "follow me"' },
{ mode: 'lead', cmd: '"lead me" / "go ahead"' },
{ mode: 'side', cmd: '"stay beside"' },
{ mode: 'orbit', cmd: '"orbit"' },
{ mode: 'loose', cmd: '"give me space"' },
{ mode: 'tight', cmd: '"stay close"' },
];
export function NavModeSelector({ subscribe, publish }) {
const [activeMode, setActiveMode] = useState(null);
const [navStatus, setNavStatus] = useState('');
const [sending, setSending] = useState(null);
// Subscribe to echoed mode topic
useEffect(() => {
const unsub = subscribe(
'/social/nav/mode',
'std_msgs/String',
(msg) => setActiveMode(msg.data)
);
return unsub;
}, [subscribe]);
// Subscribe to nav status
useEffect(() => {
const unsub = subscribe(
'/social/nav/status',
'std_msgs/String',
(msg) => setNavStatus(msg.data)
);
return unsub;
}, [subscribe]);
const handleMode = async (modeId) => {
setSending(modeId);
publish('/social/nav/mode', 'std_msgs/String', { data: modeId });
// Optimistic update; will be confirmed when echoed back
setActiveMode(modeId);
setTimeout(() => setSending(null), 800);
};
return (
<div className="space-y-4">
{/* Status */}
<div className="bg-gray-950 rounded-lg border border-cyan-950 p-4">
<div className="text-cyan-700 text-xs font-bold tracking-widest mb-2">NAV STATUS</div>
<div className="flex items-center gap-3">
{activeMode ? (
<>
<div className="w-2.5 h-2.5 rounded-full bg-green-400 animate-pulse" />
<span className="text-gray-300 text-sm">
Mode: <span className="text-cyan-300 font-bold uppercase">{activeMode}</span>
</span>
</>
) : (
<>
<div className="w-2.5 h-2.5 rounded-full bg-gray-600" />
<span className="text-gray-600 text-sm">No mode received</span>
</>
)}
{navStatus && (
<span className="ml-auto text-gray-500 text-xs">{navStatus}</span>
)}
</div>
</div>
{/* Mode buttons */}
<div className="bg-gray-950 rounded-lg border border-cyan-950 p-4">
<div className="text-cyan-700 text-xs font-bold tracking-widest mb-3">FOLLOW MODE</div>
<div className="grid grid-cols-2 sm:grid-cols-3 gap-2">
{MODES.map((mode) => {
const isActive = activeMode === mode.id;
const isSending = sending === mode.id;
return (
<button
key={mode.id}
onClick={() => handleMode(mode.id)}
disabled={isSending}
title={mode.description}
className={`flex flex-col items-center gap-1 py-3 px-2 rounded-lg border font-bold text-sm transition-all duration-200 ${
isActive ? mode.activeColor : mode.color
} hover:opacity-90 active:scale-95 disabled:cursor-wait`}
>
<span className="text-xl">{mode.icon}</span>
<span className="tracking-widest text-xs">{mode.label}</span>
{isSending && <span className="text-xs opacity-60">sending</span>}
</button>
);
})}
</div>
<p className="text-gray-600 text-xs mt-3">
Tap to publish to <code>/social/nav/mode</code>
</p>
</div>
{/* Voice commands reference */}
<div className="bg-gray-950 rounded-lg border border-cyan-950 p-4">
<div className="text-cyan-700 text-xs font-bold tracking-widest mb-3">VOICE COMMANDS</div>
<div className="space-y-1.5">
{VOICE_COMMANDS.map(({ mode, cmd }) => (
<div key={mode} className="flex items-center gap-2 text-xs">
<span className="text-gray-500 uppercase w-14 shrink-0">{mode}</span>
<span className="text-gray-400 italic">{cmd}</span>
</div>
))}
</div>
<p className="text-gray-600 text-xs mt-3">
Voice commands are parsed by <code>social_nav_node</code> from{' '}
<code>/social/speech/command</code>.
</p>
</div>
</div>
);
}

View File

@ -1,228 +0,0 @@
/**
* PersonalityTuner.jsx Personality dial controls.
*
* Reads current personality from /social/personality/state (PersonalityState).
* Writes via /personality_node/set_parameters (rcl_interfaces/srv/SetParameters).
*
* SOUL.md params controlled:
* sass_level (int, 010)
* humor_level (int, 010)
* verbosity (int, 010) new param, add to SOUL.md + personality_node
*/
import { useEffect, useState } from 'react';
const PERSONALITY_DIALS = [
{
key: 'sass_level',
label: 'SASS',
param: { name: 'sass_level', type: 'integer' },
min: 0, max: 10,
leftLabel: 'Polite',
rightLabel: 'Maximum Sass',
color: 'accent-orange',
barColor: '#f97316',
description: '0 = pure politeness, 10 = maximum sass',
},
{
key: 'humor_level',
label: 'HUMOR',
param: { name: 'humor_level', type: 'integer' },
min: 0, max: 10,
leftLabel: 'Deadpan',
rightLabel: 'Comedian',
color: 'accent-cyan',
barColor: '#06b6d4',
description: '0 = deadpan/serious, 10 = comedian',
},
{
key: 'verbosity',
label: 'VERBOSITY',
param: { name: 'verbosity', type: 'integer' },
min: 0, max: 10,
leftLabel: 'Terse',
rightLabel: 'Verbose',
color: 'accent-purple',
barColor: '#a855f7',
description: '0 = brief responses, 10 = elaborate explanations',
},
];
function DialSlider({ dial, value, onChange }) {
const pct = ((value - dial.min) / (dial.max - dial.min)) * 100;
return (
<div className="space-y-1">
<div className="flex justify-between items-center">
<span className="text-xs font-bold text-gray-400 tracking-widest">{dial.label}</span>
<span
className="text-sm font-bold w-6 text-right"
style={{ color: dial.barColor }}
>
{value}
</span>
</div>
<div className="relative">
<input
type="range"
min={dial.min}
max={dial.max}
value={value}
onChange={(e) => onChange(Number(e.target.value))}
className={`w-full h-1.5 rounded appearance-none cursor-pointer ${dial.color}`}
style={{
background: `linear-gradient(to right, ${dial.barColor} ${pct}%, #1f2937 ${pct}%)`,
}}
/>
</div>
<div className="flex justify-between text-xs text-gray-600">
<span>{dial.leftLabel}</span>
<span>{dial.rightLabel}</span>
</div>
</div>
);
}
export function PersonalityTuner({ subscribe, setParam }) {
const [values, setValues] = useState({ sass_level: 4, humor_level: 7, verbosity: 5 });
const [applied, setApplied] = useState(null); // last-applied snapshot
const [saving, setSaving] = useState(false);
const [saveResult, setSaveResult] = useState(null);
const [personaInfo, setPersonaInfo] = useState(null);
// Sync with live personality state
useEffect(() => {
const unsub = subscribe(
'/social/personality/state',
'saltybot_social_msgs/PersonalityState',
(msg) => {
setPersonaInfo({
name: msg.persona_name,
mood: msg.mood,
person: msg.person_id,
tier: msg.relationship_tier,
greeting: msg.greeting_text,
});
}
);
return unsub;
}, [subscribe]);
const isDirty = JSON.stringify(values) !== JSON.stringify(applied);
const handleApply = async () => {
setSaving(true);
setSaveResult(null);
try {
const params = PERSONALITY_DIALS.map((d) => ({
name: d.param.name,
type: d.param.type,
value: values[d.key],
}));
await setParam('personality_node', params);
setApplied({ ...values });
setSaveResult({ ok: true, msg: 'Parameters applied to personality_node' });
} catch (e) {
setSaveResult({ ok: false, msg: e?.message ?? 'Service call failed' });
} finally {
setSaving(false);
setTimeout(() => setSaveResult(null), 4000);
}
};
const handleReset = () => {
setValues({ sass_level: 4, humor_level: 7, verbosity: 5 });
};
return (
<div className="space-y-4">
{/* Current persona info */}
{personaInfo && (
<div className="bg-gray-950 rounded-lg border border-cyan-950 p-4">
<div className="text-cyan-700 text-xs font-bold tracking-widest mb-2">ACTIVE PERSONA</div>
<div className="grid grid-cols-2 gap-2 text-xs">
<div>
<span className="text-gray-600">Name: </span>
<span className="text-cyan-300 font-bold">{personaInfo.name || '—'}</span>
</div>
<div>
<span className="text-gray-600">Mood: </span>
<span className={`font-bold ${
personaInfo.mood === 'happy' ? 'text-green-400' :
personaInfo.mood === 'curious' ? 'text-blue-400' :
personaInfo.mood === 'annoyed' ? 'text-red-400' :
personaInfo.mood === 'playful' ? 'text-purple-400' :
'text-gray-400'
}`}>{personaInfo.mood || '—'}</span>
</div>
{personaInfo.person && (
<>
<div>
<span className="text-gray-600">Talking to: </span>
<span className="text-amber-400">{personaInfo.person}</span>
</div>
<div>
<span className="text-gray-600">Tier: </span>
<span className="text-gray-300">{personaInfo.tier || '—'}</span>
</div>
</>
)}
{personaInfo.greeting && (
<div className="col-span-2 mt-1 text-gray-400 italic border-t border-gray-800 pt-1">
&quot;{personaInfo.greeting}&quot;
</div>
)}
</div>
</div>
)}
{/* Personality sliders */}
<div className="bg-gray-950 rounded-lg border border-cyan-950 p-4 space-y-5">
<div className="text-cyan-700 text-xs font-bold tracking-widest">PERSONALITY DIALS</div>
{PERSONALITY_DIALS.map((dial) => (
<DialSlider
key={dial.key}
dial={dial}
value={values[dial.key]}
onChange={(v) => setValues((prev) => ({ ...prev, [dial.key]: v }))}
/>
))}
{/* Info */}
<div className="text-gray-700 text-xs border-t border-gray-900 pt-3">
Changes call <code className="text-gray-500">/personality_node/set_parameters</code>.
Hot-reload to SOUL.md happens within reload_interval (default 5s).
</div>
</div>
{/* Action buttons */}
<div className="flex items-center gap-2">
<button
onClick={handleApply}
disabled={saving || !isDirty}
className="flex-1 py-2 rounded font-bold text-sm border border-cyan-700 bg-cyan-950 hover:bg-cyan-900 text-cyan-300 disabled:opacity-40 disabled:cursor-not-allowed transition-colors"
>
{saving ? 'Applying…' : isDirty ? 'Apply Changes' : 'Up to Date'}
</button>
<button
onClick={handleReset}
className="px-4 py-2 rounded text-sm border border-gray-700 text-gray-400 hover:text-gray-200 hover:border-gray-600 transition-colors"
>
Defaults
</button>
</div>
{/* Save result */}
{saveResult && (
<div className={`rounded px-3 py-2 text-xs ${
saveResult.ok
? 'bg-green-950 border border-green-800 text-green-300'
: 'bg-red-950 border border-red-800 text-red-300'
}`}>
{saveResult.msg}
</div>
)}
</div>
);
}

View File

@ -1,154 +0,0 @@
/**
* StatusPanel.jsx Live pipeline status display.
*
* Subscribes to /social/orchestrator/state (std_msgs/String, JSON payload):
* {
* state: "idle"|"listening"|"thinking"|"speaking"|"throttled",
* gpu_free_mb: number,
* gpu_total_mb: number,
* latency: {
* wakeword_to_transcript: { mean_ms, p95_ms, n },
* transcript_to_llm: { mean_ms, p95_ms, n },
* llm_to_tts: { mean_ms, p95_ms, n },
* end_to_end: { mean_ms, p95_ms, n }
* },
* persona_name: string,
* active_person: string
* }
*/
import { useEffect, useState } from 'react';
const STATE_CONFIG = {
idle: { label: 'IDLE', color: 'text-gray-400', bg: 'bg-gray-800', border: 'border-gray-600', pulse: '' },
listening: { label: 'LISTENING', color: 'text-blue-300', bg: 'bg-blue-950', border: 'border-blue-600', pulse: 'pulse-blue' },
thinking: { label: 'THINKING', color: 'text-amber-300', bg: 'bg-amber-950', border: 'border-amber-600', pulse: 'pulse-amber' },
speaking: { label: 'SPEAKING', color: 'text-green-300', bg: 'bg-green-950', border: 'border-green-600', pulse: 'pulse-green' },
throttled: { label: 'THROTTLED', color: 'text-red-300', bg: 'bg-red-950', border: 'border-red-600', pulse: 'pulse-amber' },
};
function LatencyRow({ label, stat }) {
if (!stat || stat.n === 0) return null;
const warn = stat.mean_ms > 500;
const crit = stat.mean_ms > 1500;
const cls = crit ? 'text-red-400' : warn ? 'text-amber-400' : 'text-cyan-400';
return (
<div className="flex justify-between items-center py-0.5">
<span className="text-gray-500 text-xs">{label}</span>
<div className="text-right">
<span className={`text-xs font-bold ${cls}`}>{Math.round(stat.mean_ms)}ms</span>
<span className="text-gray-600 text-xs ml-1">p95:{Math.round(stat.p95_ms)}ms</span>
</div>
</div>
);
}
export function StatusPanel({ subscribe }) {
const [status, setStatus] = useState(null);
const [lastUpdate, setLastUpdate] = useState(null);
useEffect(() => {
const unsub = subscribe(
'/social/orchestrator/state',
'std_msgs/String',
(msg) => {
try {
const data = JSON.parse(msg.data);
setStatus(data);
setLastUpdate(Date.now());
} catch {
// ignore parse errors
}
}
);
return unsub;
}, [subscribe]);
const state = status?.state ?? 'idle';
const cfg = STATE_CONFIG[state] ?? STATE_CONFIG.idle;
const gpuFree = status?.gpu_free_mb ?? 0;
const gpuTotal = status?.gpu_total_mb ?? 1;
const gpuUsed = gpuTotal - gpuFree;
const gpuPct = Math.round((gpuUsed / gpuTotal) * 100);
const gpuWarn = gpuPct > 80;
const lat = status?.latency ?? {};
const stale = lastUpdate && Date.now() - lastUpdate > 5000;
return (
<div className="space-y-4">
{/* Pipeline State */}
<div className="bg-gray-950 rounded-lg border border-cyan-950 p-4">
<div className="text-cyan-700 text-xs font-bold tracking-widest mb-3">PIPELINE STATE</div>
<div className="flex items-center gap-4">
<div
className={`w-5 h-5 rounded-full shrink-0 ${cfg.bg} border-2 ${cfg.border} ${cfg.pulse}`}
/>
<div>
<div className={`text-2xl font-bold tracking-widest ${cfg.color}`}>{cfg.label}</div>
{status?.persona_name && (
<div className="text-gray-500 text-xs mt-0.5">
Persona: <span className="text-cyan-500">{status.persona_name}</span>
</div>
)}
{status?.active_person && (
<div className="text-gray-500 text-xs">
Talking to: <span className="text-amber-400">{status.active_person}</span>
</div>
)}
</div>
{stale && (
<div className="ml-auto text-red-500 text-xs">STALE</div>
)}
{!status && (
<div className="ml-auto text-gray-600 text-xs">No signal</div>
)}
</div>
</div>
{/* GPU Memory */}
<div className="bg-gray-950 rounded-lg border border-cyan-950 p-4">
<div className="text-cyan-700 text-xs font-bold tracking-widest mb-3">GPU MEMORY</div>
{gpuTotal > 1 ? (
<>
<div className="flex justify-between text-xs mb-1.5">
<span className={gpuWarn ? 'text-red-400' : 'text-gray-400'}>
{Math.round(gpuUsed)} MB used
</span>
<span className="text-gray-500">{Math.round(gpuTotal)} MB total</span>
</div>
<div className="w-full h-3 bg-gray-900 rounded overflow-hidden border border-gray-800">
<div
className="h-full transition-all duration-500 rounded"
style={{
width: `${gpuPct}%`,
background: gpuPct > 90 ? '#ef4444' : gpuPct > 75 ? '#f59e0b' : '#06b6d4',
}}
/>
</div>
<div className={`text-xs mt-1 text-right ${gpuWarn ? 'text-amber-400' : 'text-gray-600'}`}>
{gpuPct}%
</div>
</>
) : (
<div className="text-gray-600 text-xs">No GPU data</div>
)}
</div>
{/* Latency */}
<div className="bg-gray-950 rounded-lg border border-cyan-950 p-4">
<div className="text-cyan-700 text-xs font-bold tracking-widest mb-3">LATENCY</div>
{Object.keys(lat).length > 0 ? (
<div className="divide-y divide-gray-900">
<LatencyRow label="Wake → Transcript" stat={lat.wakeword_to_transcript} />
<LatencyRow label="Transcript → LLM" stat={lat.transcript_to_llm} />
<LatencyRow label="LLM → TTS" stat={lat.llm_to_tts} />
<LatencyRow label="End-to-End" stat={lat.end_to_end} />
</div>
) : (
<div className="text-gray-600 text-xs">No latency data yet</div>
)}
</div>
</div>
);
}

View File

@ -1,116 +0,0 @@
/**
* useRosbridge.js React hook for ROS2 rosbridge_server WebSocket connection.
*
* rosbridge_server default: ws://<robot-ip>:9090
* Provides subscribe/publish/callService helpers bound to the active connection.
*/
import { useState, useEffect, useRef, useCallback } from 'react';
import ROSLIB from 'roslib';
export function useRosbridge(url) {
const [connected, setConnected] = useState(false);
const [error, setError] = useState(null);
const rosRef = useRef(null);
const subscribersRef = useRef(new Map()); // topic -> ROSLIB.Topic
useEffect(() => {
if (!url) return;
const ros = new ROSLIB.Ros({ url });
rosRef.current = ros;
ros.on('connection', () => {
setConnected(true);
setError(null);
});
ros.on('error', (err) => {
setError(err?.toString() || 'Connection error');
});
ros.on('close', () => {
setConnected(false);
});
return () => {
subscribersRef.current.forEach((topic) => topic.unsubscribe());
subscribersRef.current.clear();
ros.close();
rosRef.current = null;
};
}, [url]);
/** Subscribe to a ROS2 topic. Returns an unsubscribe function. */
const subscribe = useCallback((name, messageType, callback) => {
if (!rosRef.current) return () => {};
const key = `${name}::${messageType}`;
if (subscribersRef.current.has(key)) {
subscribersRef.current.get(key).unsubscribe();
}
const topic = new ROSLIB.Topic({
ros: rosRef.current,
name,
messageType,
});
topic.subscribe(callback);
subscribersRef.current.set(key, topic);
return () => {
topic.unsubscribe();
subscribersRef.current.delete(key);
};
}, []);
/** Publish a single message to a ROS2 topic. */
const publish = useCallback((name, messageType, data) => {
if (!rosRef.current) return;
const topic = new ROSLIB.Topic({
ros: rosRef.current,
name,
messageType,
});
topic.publish(new ROSLIB.Message(data));
}, []);
/** Call a ROS2 service. Returns a Promise resolving to the response. */
const callService = useCallback((name, serviceType, request = {}) => {
return new Promise((resolve, reject) => {
if (!rosRef.current) {
reject(new Error('Not connected'));
return;
}
const svc = new ROSLIB.Service({
ros: rosRef.current,
name,
serviceType,
});
svc.callService(new ROSLIB.ServiceRequest(request), resolve, reject);
});
}, []);
/** Set ROS2 node parameters via rcl_interfaces/srv/SetParameters. */
const setParam = useCallback((nodeName, params) => {
// params: { name: string, type: 'bool'|'int'|'double'|'string', value: any }[]
const TYPE_MAP = { bool: 1, integer: 2, double: 3, string: 4, int: 2 };
const parameters = params.map(({ name, type, value }) => {
const typeInt = TYPE_MAP[type] ?? 4;
const valueKey = {
1: 'bool_value',
2: 'integer_value',
3: 'double_value',
4: 'string_value',
}[typeInt];
return { name, value: { type: typeInt, [valueKey]: value } };
});
return callService(
`/${nodeName}/set_parameters`,
'rcl_interfaces/srv/SetParameters',
{ parameters }
);
}, [callService]);
return { connected, error, subscribe, publish, callService, setParam };
}

View File

@ -1,73 +0,0 @@
@tailwind base;
@tailwind components;
@tailwind utilities;
* {
box-sizing: border-box;
}
body {
font-family: 'Courier New', Courier, monospace;
background: #050510;
color: #d1d5db;
margin: 0;
padding: 0;
min-height: 100vh;
}
/* Custom scrollbar */
::-webkit-scrollbar {
width: 4px;
height: 4px;
}
::-webkit-scrollbar-track {
background: #020208;
}
::-webkit-scrollbar-thumb {
background: #1a3a4a;
border-radius: 2px;
}
::-webkit-scrollbar-thumb:hover {
background: #00b8d9;
}
/* Pulse animations for pipeline state */
@keyframes pulse-blue {
0%, 100% { box-shadow: 0 0 0 0 rgba(59, 130, 246, 0.6); }
50% { box-shadow: 0 0 0 8px rgba(59, 130, 246, 0); }
}
@keyframes pulse-amber {
0%, 100% { box-shadow: 0 0 0 0 rgba(245, 158, 11, 0.6); }
50% { box-shadow: 0 0 0 8px rgba(245, 158, 11, 0); }
}
@keyframes pulse-green {
0%, 100% { box-shadow: 0 0 0 0 rgba(34, 197, 94, 0.6); }
50% { box-shadow: 0 0 0 8px rgba(34, 197, 94, 0); }
}
.pulse-blue { animation: pulse-blue 1.2s infinite; }
.pulse-amber { animation: pulse-amber 0.8s infinite; }
.pulse-green { animation: pulse-green 0.6s infinite; }
/* Conversation bubbles */
.bubble-human {
background: #1e3a5f;
border: 1px solid #2d6a9f;
}
.bubble-bot {
background: #0d3030;
border: 1px solid #0d9488;
}
.bubble-partial {
opacity: 0.7;
}
/* Slider accent colors */
input[type='range'].accent-cyan { accent-color: #00b8d9; }
input[type='range'].accent-orange { accent-color: #f97316; }
input[type='range'].accent-purple { accent-color: #a855f7; }
/* Mode button active glow */
.mode-active {
box-shadow: 0 0 10px rgba(0, 184, 217, 0.4);
}

View File

@ -1,10 +0,0 @@
import React from 'react';
import ReactDOM from 'react-dom/client';
import App from './App.jsx';
import './index.css';
ReactDOM.createRoot(document.getElementById('root')).render(
<React.StrictMode>
<App />
</React.StrictMode>
);

View File

@ -1,12 +0,0 @@
/** @type {import('tailwindcss').Config} */
export default {
content: ['./index.html', './src/**/*.{js,jsx}'],
theme: {
extend: {
fontFamily: {
mono: ['"Courier New"', 'Courier', 'monospace'],
},
},
},
plugins: [],
};

View File

@ -1,14 +0,0 @@
import { defineConfig } from 'vite';
import react from '@vitejs/plugin-react';
export default defineConfig({
plugins: [react()],
server: {
port: 8080,
host: true,
},
preview: {
port: 8080,
host: true,
},
});