Configure Jetson Orin with Tailscale client connecting to Headscale coordination server at tailscale.vayrette.com:8180. Device registers as 'saltylab-orin' with persistent auth key for unattended login. Features: - systemd auto-start and restart on WiFi drops - Persistent auth key storage at /opt/saltybot/tailscale-auth.key - SSH + HTTP access over Tailscale tailnet (encrypted WireGuard) - IP forwarding enabled for relay/exit node capability - WiFi resilience with aggressive restart policy - MQTT reporting of VPN status, IP, and connection type Components added: - jetson/scripts/setup-tailscale.sh: Tailscale package installation - jetson/scripts/headscale-auth-helper.sh: Auth key management utility - jetson/systemd/tailscale-vpn.service: systemd service unit - jetson/docs/headscale-vpn-setup.md: Comprehensive setup documentation - saltybot_cellular/vpn_status_node.py: ROS2 node for MQTT reporting Updated: - jetson/systemd/install_systemd.sh: Include tailscale-vpn.service - jetson/scripts/setup-jetson.sh: Add Tailscale setup steps Access patterns: - SSH: ssh user@saltylab-orin.tail12345.ts.net - HTTP: http://saltylab-orin.tail12345.ts.net:port - Direct IP: 100.x.x.x (Tailscale allocated address) Co-Authored-By: Claude Haiku 4.5 <noreply@anthropic.com>
216 lines
9.2 KiB
Bash
216 lines
9.2 KiB
Bash
#!/usr/bin/env bash
|
|
# Jetson Orin Nano Super host setup script
|
|
# Run once on fresh JetPack 6 installation (Ubuntu 22.04)
|
|
# Usage: sudo bash setup-jetson.sh
|
|
|
|
set -euo pipefail
|
|
|
|
echo "=== Jetson Orin Nano Super Host Setup — saltybot ==="
|
|
echo "JetPack 6.x / L4T R36.x / Ubuntu 22.04 expected"
|
|
|
|
# ── Verify we're on Jetson Orin ───────────────────────────────────────────────
|
|
if ! uname -m | grep -q aarch64; then
|
|
echo "ERROR: Must run on Jetson (aarch64). Got: $(uname -m)"
|
|
exit 1
|
|
fi
|
|
|
|
if [ -f /etc/nv_tegra_release ]; then
|
|
L4T_VER=$(head -1 /etc/nv_tegra_release | grep -o 'R[0-9]*' | head -1)
|
|
echo "[i] Detected L4T: $L4T_VER"
|
|
if [[ "$L4T_VER" != "R36" ]]; then
|
|
echo "WARNING: Expected L4T R36 (JetPack 6). Got $L4T_VER"
|
|
echo " This script is tuned for Orin Nano Super / JetPack 6."
|
|
read -rp "Continue anyway? [y/N] " ans
|
|
[[ "${ans,,}" == "y" ]] || exit 1
|
|
fi
|
|
fi
|
|
|
|
# ── System update ─────────────────────────────────────────────────────────────
|
|
apt-get update && apt-get upgrade -y
|
|
|
|
# ── Install Docker + NVIDIA Container Toolkit ─────────────────────────────────
|
|
if ! command -v docker &>/dev/null; then
|
|
echo "[+] Installing Docker..."
|
|
curl -fsSL https://get.docker.com | sh
|
|
usermod -aG docker "$SUDO_USER"
|
|
fi
|
|
|
|
# NVIDIA Container Toolkit (JetPack 6 method — replaces legacy nvidia-docker2)
|
|
if ! dpkg -l | grep -q nvidia-container-toolkit; then
|
|
echo "[+] Installing NVIDIA Container Toolkit..."
|
|
distribution=$(. /etc/os-release; echo "${ID}${VERSION_ID}")
|
|
# JetPack 6 / Ubuntu 22.04 uses the new toolkit keyring
|
|
curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey \
|
|
| gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg
|
|
curl -s -L "https://nvidia.github.io/libnvidia-container/$distribution/libnvidia-container.list" \
|
|
| sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' \
|
|
> /etc/apt/sources.list.d/nvidia-container-toolkit.list
|
|
apt-get update
|
|
apt-get install -y nvidia-container-toolkit
|
|
nvidia-ctk runtime configure --runtime=docker
|
|
fi
|
|
|
|
# Configure Docker daemon for NVIDIA runtime + NVMe data root
|
|
cat > /etc/docker/daemon.json << 'EOF'
|
|
{
|
|
"runtimes": {
|
|
"nvidia": {
|
|
"path": "nvidia-container-runtime",
|
|
"runtimeArgs": []
|
|
}
|
|
},
|
|
"default-runtime": "nvidia",
|
|
"data-root": "/mnt/nvme/docker"
|
|
}
|
|
EOF
|
|
systemctl restart docker
|
|
|
|
# ── Set Jetson power mode to MAXN 25W ─────────────────────────────────────────
|
|
echo "[+] Setting MAXN 25W power mode (Orin Nano Super)..."
|
|
nvpmodel -m 0
|
|
jetson_clocks
|
|
|
|
# ── NVMe SSD setup ────────────────────────────────────────────────────────────
|
|
echo "[+] Setting up NVMe SSD..."
|
|
if lsblk | grep -q nvme; then
|
|
NVME_DEV=$(lsblk -d -n -o NAME | grep nvme | head -1)
|
|
NVME_PATH="/dev/$NVME_DEV"
|
|
|
|
if ! lsblk "${NVME_PATH}" | grep -q "${NVME_DEV}p1"; then
|
|
echo " [+] Partitioning NVMe at ${NVME_PATH}..."
|
|
parted "${NVME_PATH}" --script mklabel gpt
|
|
parted "${NVME_PATH}" --script mkpart primary ext4 0% 100%
|
|
mkfs.ext4 -F "${NVME_PATH}p1"
|
|
fi
|
|
|
|
mkdir -p /mnt/nvme
|
|
if ! grep -q "/mnt/nvme" /etc/fstab; then
|
|
NVME_UUID=$(blkid -s UUID -o value "${NVME_PATH}p1")
|
|
echo "UUID=${NVME_UUID} /mnt/nvme ext4 defaults,noatime 0 2" >> /etc/fstab
|
|
fi
|
|
mount -a
|
|
|
|
# Create saltybot directories on NVMe
|
|
mkdir -p /mnt/nvme/{saltybot,docker,rosbags,slam-maps}
|
|
mkdir -p /mnt/nvme/saltybot/maps
|
|
chown -R "$SUDO_USER":"$SUDO_USER" /mnt/nvme/saltybot /mnt/nvme/rosbags /mnt/nvme/slam-maps
|
|
echo " [+] NVMe mounted at /mnt/nvme"
|
|
else
|
|
echo " [!] No NVMe detected. Skipping NVMe setup."
|
|
echo " Install an M.2 NVMe SSD in the Key M slot for best performance."
|
|
fi
|
|
|
|
# ── Install udev rules ────────────────────────────────────────────────────────
|
|
echo "[+] Installing udev rules..."
|
|
cat > /etc/udev/rules.d/99-saltybot.rules << 'EOF'
|
|
# RPLIDAR A1M8 (SiliconLabs CP2102)
|
|
KERNEL=="ttyUSB*", ATTRS{idVendor}=="10c4", ATTRS{idProduct}=="ea60", \
|
|
SYMLINK+="rplidar", MODE="0666"
|
|
|
|
# STM32 USB CDC (STMicroelectronics Virtual COM)
|
|
KERNEL=="ttyACM*", ATTRS{idVendor}=="0483", ATTRS{idProduct}=="5740", \
|
|
SYMLINK+="stm32-bridge", MODE="0666"
|
|
|
|
# Intel RealSense D435i
|
|
SUBSYSTEM=="usb", ATTRS{idVendor}=="8086", ATTRS{idProduct}=="0b3a", \
|
|
MODE="0666"
|
|
|
|
# IMX219 CSI cameras via V4L2
|
|
KERNEL=="video[0246]", SUBSYSTEM=="video4linux", MODE="0666", GROUP="video"
|
|
EOF
|
|
|
|
udevadm control --reload-rules
|
|
udevadm trigger
|
|
|
|
# ── Install RealSense udev rules ──────────────────────────────────────────────
|
|
echo "[+] Installing RealSense udev rules..."
|
|
if [ ! -f /etc/udev/rules.d/99-realsense-libusb.rules ]; then
|
|
wget -q -O /etc/udev/rules.d/99-realsense-libusb.rules \
|
|
https://raw.githubusercontent.com/IntelRealSense/librealsense/master/config/99-realsense-libusb.rules
|
|
udevadm control --reload-rules
|
|
udevadm trigger
|
|
fi
|
|
|
|
# ── Enable I2C + UART ─────────────────────────────────────────────────────────
|
|
echo "[+] Enabling I2C and UART..."
|
|
modprobe i2c-dev
|
|
# Add user to required groups (i2c-7 on Orin Nano)
|
|
usermod -aG i2c,dialout,gpio,video "$SUDO_USER"
|
|
|
|
# ── Configure UART (disable console on ttyTHS0) ───────────────────────────────
|
|
# ttyTHS0 is the 40-pin header UART on Orin — disable serial console to free it
|
|
if grep -q "console=ttyTCU0" /boot/extlinux/extlinux.conf 2>/dev/null; then
|
|
echo "[i] Serial console is on ttyTCU0 (debug UART) — ttyTHS0 is free."
|
|
else
|
|
echo "[!] Check /boot/extlinux/extlinux.conf — ensure ttyTHS0 is not used"
|
|
echo " as a serial console if you need it for STM32 UART fallback."
|
|
fi
|
|
|
|
# ── Check CSI camera drivers ──────────────────────────────────────────────────
|
|
echo "[+] Checking CSI camera drivers..."
|
|
if modprobe imx219 2>/dev/null; then
|
|
echo " [+] IMX219 driver loaded."
|
|
else
|
|
echo " [!] IMX219 driver not available — may need JetPack camera driver package."
|
|
echo " Install: sudo apt-get install nvidia-jetpack"
|
|
fi
|
|
|
|
if command -v v4l2-ctl &>/dev/null; then
|
|
echo " [i] V4L2 devices:"
|
|
v4l2-ctl --list-devices 2>/dev/null || echo " (no cameras detected yet)"
|
|
else
|
|
apt-get install -y v4l-utils
|
|
fi
|
|
|
|
# ── Docker Compose ────────────────────────────────────────────────────────────
|
|
if ! command -v docker-compose &>/dev/null && ! docker compose version &>/dev/null 2>&1; then
|
|
echo "[+] Installing docker-compose plugin..."
|
|
apt-get install -y docker-compose-plugin
|
|
fi
|
|
|
|
# ── Swap (prefer NVMe if available) ──────────────────────────────────────────
|
|
if [ "$(swapon --show | wc -l)" -le 1 ]; then
|
|
if [ -d /mnt/nvme ]; then
|
|
echo "[+] Creating 8GB swap on NVMe..."
|
|
fallocate -l 8G /mnt/nvme/swapfile
|
|
chmod 600 /mnt/nvme/swapfile
|
|
mkswap /mnt/nvme/swapfile
|
|
swapon /mnt/nvme/swapfile
|
|
echo '/mnt/nvme/swapfile none swap sw 0 0' >> /etc/fstab
|
|
else
|
|
echo "[+] Creating 4GB swap file on eMMC..."
|
|
fallocate -l 4G /swapfile
|
|
chmod 600 /swapfile
|
|
mkswap /swapfile
|
|
swapon /swapfile
|
|
echo '/swapfile none swap sw 0 0' >> /etc/fstab
|
|
fi
|
|
fi
|
|
|
|
# ── Install jtop for power monitoring ────────────────────────────────────────
|
|
if ! command -v jtop &>/dev/null; then
|
|
echo "[+] Installing jetson-stats (jtop)..."
|
|
pip3 install jetson-stats
|
|
fi
|
|
|
|
echo ""
|
|
echo "=== Setup complete ==="
|
|
echo "Please log out and back in for group membership to take effect."
|
|
echo ""
|
|
echo "Next steps:"
|
|
echo " 1. Install Tailscale VPN for Headscale:"
|
|
echo " sudo bash scripts/setup-tailscale.sh"
|
|
echo " 2. Configure auth key:"
|
|
echo " sudo bash scripts/headscale-auth-helper.sh generate"
|
|
echo " 3. Install systemd services:"
|
|
echo " sudo bash systemd/install_systemd.sh"
|
|
echo " 4. Build and start Docker services:"
|
|
echo " cd jetson/"
|
|
echo " docker compose build"
|
|
echo " docker compose up -d"
|
|
echo " docker compose logs -f"
|
|
echo ""
|
|
echo "Monitor power: sudo jtop"
|
|
echo "Check cameras: v4l2-ctl --list-devices"
|
|
echo "Check VPN status: sudo tailscale status"
|