sync: 2026-03-14 17:28 — 45 files from Alexandria
Some checks failed
Lint & Format / detect (push) Has been cancelled
Lint & Format / js-lint (push) Has been cancelled
Lint & Format / py-lint (push) Has been cancelled
Lint & Format / sh-lint (push) Has been cancelled
Lint & Format / go-lint (push) Has been cancelled

RoadChain-SHA2048: abc9be08a46f52c2
RoadChain-Identity: alexa@sovereign
RoadChain-Full: abc9be08a46f52c20e45ae95233112ee88407ca3a606ec0ef568784041b755a56343cf4d7b817f2f4f7a11ec3f34ce826f607b2150a77248ade06b449e5b7e281b4be665ab148c46e3b71c9c029ee4d77f120e5919a7b87b0b7b6ed45f12c87f420fdda633f3bae4c5f7b851979bb52c725913fa63300772174263d1e64a02aa3356f73819e1110ad94d16836fa9f24b40e60e2da2f252506fbf02f82acc5fb8e03fd6ec08691ea60dea318ce5099a93d8ead7f9ef45b13a1ab533f592b60c702a0ba854b243e94be7eece0bfab14f822a928f8681c8777dc6a881da7e2ec324d6ace471f6c3f77ad83a22bfea01760be75f191128aa0a100d497dd0f0801ea8
This commit is contained in:
2026-03-14 17:28:48 -05:00
parent 84864e7733
commit 0c1d3bacfc
45 changed files with 1911 additions and 2529 deletions

View File

@@ -1,122 +0,0 @@
#!/usr/bin/env bash
# ============================================================================
# BLACKROAD OS, INC. - PROPRIETARY AND CONFIDENTIAL
# Copyright (c) 2025-2026 BlackRoad OS, Inc. All Rights Reserved.
# ============================================================================
# ask-node - Universal fleet node query (symlink-aware)
# Detects target node from argv[0]: ask-cecilia → query cecilia
# Usage: ask-<node> "question" | ask-<node> --status
set -eo pipefail
source "$HOME/.blackroad/config/nodes.sh" 2>/dev/null || true
# Detect target node from symlink name (ask-cecilia → cecilia)
SCRIPT_NAME=$(basename "$0")
if [[ "$SCRIPT_NAME" == ask-* ]]; then
TARGET_NODE="${SCRIPT_NAME#ask-}"
else
TARGET_NODE="${1:-}"
shift 2>/dev/null || true
fi
# Model mapping per node
declare -A NODE_MODELS=(
[cecilia]="llama3.2"
[lucidia]="llama3.2"
[alice]="tinyllama:latest"
[octavia]="llama3.2"
[aria]="tinyllama:latest"
)
MODEL="${NODE_MODELS[$TARGET_NODE]:-llama3.2}"
show_status() {
local ip="${NODE_IP[$TARGET_NODE]:-}"
local user="${NODE_USER[$TARGET_NODE]:-}"
[[ -z "$ip" ]] && { echo "Unknown node: $TARGET_NODE"; return 1; }
printf '%b%s%b (%s)\n' "$PINK" "$TARGET_NODE" "$RESET" "$ip"
# Ping
if ping -c 1 -W 2 "$ip" &>/dev/null; then
printf ' Network: %bONLINE%b\n' "$GREEN" "$RESET"
else
printf ' Network: %bOFFLINE%b\n' "$RED" "$RESET"
return 1
fi
# Ollama
local tags
tags=$(curl -sf --connect-timeout 2 "http://${ip}:11434/api/tags" 2>/dev/null)
if [[ -n "$tags" ]]; then
local count
count=$(echo "$tags" | jq '.models | length' 2>/dev/null)
printf ' Ollama: %bUP%b (%s models)\n' "$GREEN" "$RESET" "$count"
echo "$tags" | jq -r '.models[].name' 2>/dev/null | sed 's/^/ /'
else
printf ' Ollama: %bDOWN%b\n' "$RED" "$RESET"
fi
# System
if br_ssh_up "$TARGET_NODE" 2>/dev/null; then
local info
info=$(br_ssh "$TARGET_NODE" "echo \$(vcgencmd measure_temp 2>/dev/null | grep -oP '[0-9.]+')°C, load \$(cat /proc/loadavg | awk '{print \$1}')" 2>/dev/null)
printf ' System: %s\n' "${info:-—}"
fi
}
query_node() {
local prompt="$*"
local ip="${NODE_IP[$TARGET_NODE]:-}"
[[ -z "$ip" ]] && { echo "Unknown node: $TARGET_NODE"; return 1; }
[[ -z "$prompt" ]] && { echo "Usage: ask-$TARGET_NODE \"question\""; return 1; }
printf '%bask%b → %s (%s, model: %s)\n\n' "$PINK" "$RESET" "$TARGET_NODE" "$ip" "$MODEL" >&2
local response
response=$(curl -sf --max-time 120 "http://${ip}:11434/api/generate" \
-d "{\"model\":\"$MODEL\",\"prompt\":$(printf '%s' "$prompt" | jq -Rs .),\"stream\":false}" 2>/dev/null | \
jq -r '.response // empty' 2>/dev/null)
if [[ -n "$response" ]]; then
echo "$response"
else
printf '%bNo response from %s (model: %s)%b\n' "$RED" "$TARGET_NODE" "$MODEL" "$RESET" >&2
# Try SSH fallback
if br_ssh_up "$TARGET_NODE" 2>/dev/null; then
printf '%bTrying SSH fallback...%b\n' "$AMBER" "$RESET" >&2
br_ssh "$TARGET_NODE" "ollama run $MODEL <<< '$(_sql_escape "$prompt")'" 2>/dev/null
fi
fi
}
interactive() {
printf '%bask%b → %s (interactive, Ctrl+D to exit)\n\n' "$PINK" "$RESET" "$TARGET_NODE" >&2
while printf '%b> %b' "$AMBER" "$RESET" && IFS= read -r line; do
[[ -z "$line" ]] && continue
query_node "$line"
echo
done
}
# SQL escape for SSH fallback
_sql_escape() { echo "$1" | sed "s/'/'\\''/g"; }
# Parse args
case "${1:-}" in
--status|-s|status) show_status ;;
--help|-h|help)
echo "ask-$TARGET_NODE - Query $TARGET_NODE's AI"
echo ""
echo "Usage:"
echo " ask-$TARGET_NODE \"question\" One-shot query"
echo " ask-$TARGET_NODE Interactive mode"
echo " ask-$TARGET_NODE --status Show node AI status"
;;
"")
interactive ;;
*)
query_node "$@" ;;
esac

1
bin/ask-alice Symbolic link
View File

@@ -0,0 +1 @@
/Users/alexa/bin/ask-node

View File

@@ -1,122 +0,0 @@
#!/usr/bin/env bash
# ============================================================================
# BLACKROAD OS, INC. - PROPRIETARY AND CONFIDENTIAL
# Copyright (c) 2025-2026 BlackRoad OS, Inc. All Rights Reserved.
# ============================================================================
# ask-node - Universal fleet node query (symlink-aware)
# Detects target node from argv[0]: ask-cecilia → query cecilia
# Usage: ask-<node> "question" | ask-<node> --status
set -eo pipefail
source "$HOME/.blackroad/config/nodes.sh" 2>/dev/null || true
# Detect target node from symlink name (ask-cecilia → cecilia)
SCRIPT_NAME=$(basename "$0")
if [[ "$SCRIPT_NAME" == ask-* ]]; then
TARGET_NODE="${SCRIPT_NAME#ask-}"
else
TARGET_NODE="${1:-}"
shift 2>/dev/null || true
fi
# Model mapping per node
declare -A NODE_MODELS=(
[cecilia]="llama3.2"
[lucidia]="llama3.2"
[alice]="tinyllama:latest"
[octavia]="llama3.2"
[aria]="tinyllama:latest"
)
MODEL="${NODE_MODELS[$TARGET_NODE]:-llama3.2}"
show_status() {
local ip="${NODE_IP[$TARGET_NODE]:-}"
local user="${NODE_USER[$TARGET_NODE]:-}"
[[ -z "$ip" ]] && { echo "Unknown node: $TARGET_NODE"; return 1; }
printf '%b%s%b (%s)\n' "$PINK" "$TARGET_NODE" "$RESET" "$ip"
# Ping
if ping -c 1 -W 2 "$ip" &>/dev/null; then
printf ' Network: %bONLINE%b\n' "$GREEN" "$RESET"
else
printf ' Network: %bOFFLINE%b\n' "$RED" "$RESET"
return 1
fi
# Ollama
local tags
tags=$(curl -sf --connect-timeout 2 "http://${ip}:11434/api/tags" 2>/dev/null)
if [[ -n "$tags" ]]; then
local count
count=$(echo "$tags" | jq '.models | length' 2>/dev/null)
printf ' Ollama: %bUP%b (%s models)\n' "$GREEN" "$RESET" "$count"
echo "$tags" | jq -r '.models[].name' 2>/dev/null | sed 's/^/ /'
else
printf ' Ollama: %bDOWN%b\n' "$RED" "$RESET"
fi
# System
if br_ssh_up "$TARGET_NODE" 2>/dev/null; then
local info
info=$(br_ssh "$TARGET_NODE" "echo \$(vcgencmd measure_temp 2>/dev/null | grep -oP '[0-9.]+')°C, load \$(cat /proc/loadavg | awk '{print \$1}')" 2>/dev/null)
printf ' System: %s\n' "${info:-—}"
fi
}
query_node() {
local prompt="$*"
local ip="${NODE_IP[$TARGET_NODE]:-}"
[[ -z "$ip" ]] && { echo "Unknown node: $TARGET_NODE"; return 1; }
[[ -z "$prompt" ]] && { echo "Usage: ask-$TARGET_NODE \"question\""; return 1; }
printf '%bask%b → %s (%s, model: %s)\n\n' "$PINK" "$RESET" "$TARGET_NODE" "$ip" "$MODEL" >&2
local response
response=$(curl -sf --max-time 120 "http://${ip}:11434/api/generate" \
-d "{\"model\":\"$MODEL\",\"prompt\":$(printf '%s' "$prompt" | jq -Rs .),\"stream\":false}" 2>/dev/null | \
jq -r '.response // empty' 2>/dev/null)
if [[ -n "$response" ]]; then
echo "$response"
else
printf '%bNo response from %s (model: %s)%b\n' "$RED" "$TARGET_NODE" "$MODEL" "$RESET" >&2
# Try SSH fallback
if br_ssh_up "$TARGET_NODE" 2>/dev/null; then
printf '%bTrying SSH fallback...%b\n' "$AMBER" "$RESET" >&2
br_ssh "$TARGET_NODE" "ollama run $MODEL <<< '$(_sql_escape "$prompt")'" 2>/dev/null
fi
fi
}
interactive() {
printf '%bask%b → %s (interactive, Ctrl+D to exit)\n\n' "$PINK" "$RESET" "$TARGET_NODE" >&2
while printf '%b> %b' "$AMBER" "$RESET" && IFS= read -r line; do
[[ -z "$line" ]] && continue
query_node "$line"
echo
done
}
# SQL escape for SSH fallback
_sql_escape() { echo "$1" | sed "s/'/'\\''/g"; }
# Parse args
case "${1:-}" in
--status|-s|status) show_status ;;
--help|-h|help)
echo "ask-$TARGET_NODE - Query $TARGET_NODE's AI"
echo ""
echo "Usage:"
echo " ask-$TARGET_NODE \"question\" One-shot query"
echo " ask-$TARGET_NODE Interactive mode"
echo " ask-$TARGET_NODE --status Show node AI status"
;;
"")
interactive ;;
*)
query_node "$@" ;;
esac

1
bin/ask-aria Symbolic link
View File

@@ -0,0 +1 @@
/Users/alexa/bin/ask-node

View File

@@ -1,122 +0,0 @@
#!/usr/bin/env bash
# ============================================================================
# BLACKROAD OS, INC. - PROPRIETARY AND CONFIDENTIAL
# Copyright (c) 2025-2026 BlackRoad OS, Inc. All Rights Reserved.
# ============================================================================
# ask-node - Universal fleet node query (symlink-aware)
# Detects target node from argv[0]: ask-cecilia → query cecilia
# Usage: ask-<node> "question" | ask-<node> --status
set -eo pipefail
source "$HOME/.blackroad/config/nodes.sh" 2>/dev/null || true
# Detect target node from symlink name (ask-cecilia → cecilia)
SCRIPT_NAME=$(basename "$0")
if [[ "$SCRIPT_NAME" == ask-* ]]; then
TARGET_NODE="${SCRIPT_NAME#ask-}"
else
TARGET_NODE="${1:-}"
shift 2>/dev/null || true
fi
# Model mapping per node
declare -A NODE_MODELS=(
[cecilia]="llama3.2"
[lucidia]="llama3.2"
[alice]="tinyllama:latest"
[octavia]="llama3.2"
[aria]="tinyllama:latest"
)
MODEL="${NODE_MODELS[$TARGET_NODE]:-llama3.2}"
show_status() {
local ip="${NODE_IP[$TARGET_NODE]:-}"
local user="${NODE_USER[$TARGET_NODE]:-}"
[[ -z "$ip" ]] && { echo "Unknown node: $TARGET_NODE"; return 1; }
printf '%b%s%b (%s)\n' "$PINK" "$TARGET_NODE" "$RESET" "$ip"
# Ping
if ping -c 1 -W 2 "$ip" &>/dev/null; then
printf ' Network: %bONLINE%b\n' "$GREEN" "$RESET"
else
printf ' Network: %bOFFLINE%b\n' "$RED" "$RESET"
return 1
fi
# Ollama
local tags
tags=$(curl -sf --connect-timeout 2 "http://${ip}:11434/api/tags" 2>/dev/null)
if [[ -n "$tags" ]]; then
local count
count=$(echo "$tags" | jq '.models | length' 2>/dev/null)
printf ' Ollama: %bUP%b (%s models)\n' "$GREEN" "$RESET" "$count"
echo "$tags" | jq -r '.models[].name' 2>/dev/null | sed 's/^/ /'
else
printf ' Ollama: %bDOWN%b\n' "$RED" "$RESET"
fi
# System
if br_ssh_up "$TARGET_NODE" 2>/dev/null; then
local info
info=$(br_ssh "$TARGET_NODE" "echo \$(vcgencmd measure_temp 2>/dev/null | grep -oP '[0-9.]+')°C, load \$(cat /proc/loadavg | awk '{print \$1}')" 2>/dev/null)
printf ' System: %s\n' "${info:-—}"
fi
}
query_node() {
local prompt="$*"
local ip="${NODE_IP[$TARGET_NODE]:-}"
[[ -z "$ip" ]] && { echo "Unknown node: $TARGET_NODE"; return 1; }
[[ -z "$prompt" ]] && { echo "Usage: ask-$TARGET_NODE \"question\""; return 1; }
printf '%bask%b → %s (%s, model: %s)\n\n' "$PINK" "$RESET" "$TARGET_NODE" "$ip" "$MODEL" >&2
local response
response=$(curl -sf --max-time 120 "http://${ip}:11434/api/generate" \
-d "{\"model\":\"$MODEL\",\"prompt\":$(printf '%s' "$prompt" | jq -Rs .),\"stream\":false}" 2>/dev/null | \
jq -r '.response // empty' 2>/dev/null)
if [[ -n "$response" ]]; then
echo "$response"
else
printf '%bNo response from %s (model: %s)%b\n' "$RED" "$TARGET_NODE" "$MODEL" "$RESET" >&2
# Try SSH fallback
if br_ssh_up "$TARGET_NODE" 2>/dev/null; then
printf '%bTrying SSH fallback...%b\n' "$AMBER" "$RESET" >&2
br_ssh "$TARGET_NODE" "ollama run $MODEL <<< '$(_sql_escape "$prompt")'" 2>/dev/null
fi
fi
}
interactive() {
printf '%bask%b → %s (interactive, Ctrl+D to exit)\n\n' "$PINK" "$RESET" "$TARGET_NODE" >&2
while printf '%b> %b' "$AMBER" "$RESET" && IFS= read -r line; do
[[ -z "$line" ]] && continue
query_node "$line"
echo
done
}
# SQL escape for SSH fallback
_sql_escape() { echo "$1" | sed "s/'/'\\''/g"; }
# Parse args
case "${1:-}" in
--status|-s|status) show_status ;;
--help|-h|help)
echo "ask-$TARGET_NODE - Query $TARGET_NODE's AI"
echo ""
echo "Usage:"
echo " ask-$TARGET_NODE \"question\" One-shot query"
echo " ask-$TARGET_NODE Interactive mode"
echo " ask-$TARGET_NODE --status Show node AI status"
;;
"")
interactive ;;
*)
query_node "$@" ;;
esac

1
bin/ask-cecilia Symbolic link
View File

@@ -0,0 +1 @@
/Users/alexa/bin/ask-node

View File

@@ -1,122 +0,0 @@
#!/usr/bin/env bash
# ============================================================================
# BLACKROAD OS, INC. - PROPRIETARY AND CONFIDENTIAL
# Copyright (c) 2025-2026 BlackRoad OS, Inc. All Rights Reserved.
# ============================================================================
# ask-node - Universal fleet node query (symlink-aware)
# Detects target node from argv[0]: ask-cecilia → query cecilia
# Usage: ask-<node> "question" | ask-<node> --status
set -eo pipefail
source "$HOME/.blackroad/config/nodes.sh" 2>/dev/null || true
# Detect target node from symlink name (ask-cecilia → cecilia)
SCRIPT_NAME=$(basename "$0")
if [[ "$SCRIPT_NAME" == ask-* ]]; then
TARGET_NODE="${SCRIPT_NAME#ask-}"
else
TARGET_NODE="${1:-}"
shift 2>/dev/null || true
fi
# Model mapping per node
declare -A NODE_MODELS=(
[cecilia]="llama3.2"
[lucidia]="llama3.2"
[alice]="tinyllama:latest"
[octavia]="llama3.2"
[aria]="tinyllama:latest"
)
MODEL="${NODE_MODELS[$TARGET_NODE]:-llama3.2}"
show_status() {
local ip="${NODE_IP[$TARGET_NODE]:-}"
local user="${NODE_USER[$TARGET_NODE]:-}"
[[ -z "$ip" ]] && { echo "Unknown node: $TARGET_NODE"; return 1; }
printf '%b%s%b (%s)\n' "$PINK" "$TARGET_NODE" "$RESET" "$ip"
# Ping
if ping -c 1 -W 2 "$ip" &>/dev/null; then
printf ' Network: %bONLINE%b\n' "$GREEN" "$RESET"
else
printf ' Network: %bOFFLINE%b\n' "$RED" "$RESET"
return 1
fi
# Ollama
local tags
tags=$(curl -sf --connect-timeout 2 "http://${ip}:11434/api/tags" 2>/dev/null)
if [[ -n "$tags" ]]; then
local count
count=$(echo "$tags" | jq '.models | length' 2>/dev/null)
printf ' Ollama: %bUP%b (%s models)\n' "$GREEN" "$RESET" "$count"
echo "$tags" | jq -r '.models[].name' 2>/dev/null | sed 's/^/ /'
else
printf ' Ollama: %bDOWN%b\n' "$RED" "$RESET"
fi
# System
if br_ssh_up "$TARGET_NODE" 2>/dev/null; then
local info
info=$(br_ssh "$TARGET_NODE" "echo \$(vcgencmd measure_temp 2>/dev/null | grep -oP '[0-9.]+')°C, load \$(cat /proc/loadavg | awk '{print \$1}')" 2>/dev/null)
printf ' System: %s\n' "${info:-—}"
fi
}
query_node() {
local prompt="$*"
local ip="${NODE_IP[$TARGET_NODE]:-}"
[[ -z "$ip" ]] && { echo "Unknown node: $TARGET_NODE"; return 1; }
[[ -z "$prompt" ]] && { echo "Usage: ask-$TARGET_NODE \"question\""; return 1; }
printf '%bask%b → %s (%s, model: %s)\n\n' "$PINK" "$RESET" "$TARGET_NODE" "$ip" "$MODEL" >&2
local response
response=$(curl -sf --max-time 120 "http://${ip}:11434/api/generate" \
-d "{\"model\":\"$MODEL\",\"prompt\":$(printf '%s' "$prompt" | jq -Rs .),\"stream\":false}" 2>/dev/null | \
jq -r '.response // empty' 2>/dev/null)
if [[ -n "$response" ]]; then
echo "$response"
else
printf '%bNo response from %s (model: %s)%b\n' "$RED" "$TARGET_NODE" "$MODEL" "$RESET" >&2
# Try SSH fallback
if br_ssh_up "$TARGET_NODE" 2>/dev/null; then
printf '%bTrying SSH fallback...%b\n' "$AMBER" "$RESET" >&2
br_ssh "$TARGET_NODE" "ollama run $MODEL <<< '$(_sql_escape "$prompt")'" 2>/dev/null
fi
fi
}
interactive() {
printf '%bask%b → %s (interactive, Ctrl+D to exit)\n\n' "$PINK" "$RESET" "$TARGET_NODE" >&2
while printf '%b> %b' "$AMBER" "$RESET" && IFS= read -r line; do
[[ -z "$line" ]] && continue
query_node "$line"
echo
done
}
# SQL escape for SSH fallback
_sql_escape() { echo "$1" | sed "s/'/'\\''/g"; }
# Parse args
case "${1:-}" in
--status|-s|status) show_status ;;
--help|-h|help)
echo "ask-$TARGET_NODE - Query $TARGET_NODE's AI"
echo ""
echo "Usage:"
echo " ask-$TARGET_NODE \"question\" One-shot query"
echo " ask-$TARGET_NODE Interactive mode"
echo " ask-$TARGET_NODE --status Show node AI status"
;;
"")
interactive ;;
*)
query_node "$@" ;;
esac

1
bin/ask-lucidia Symbolic link
View File

@@ -0,0 +1 @@
/Users/alexa/bin/ask-node

View File

@@ -1,122 +0,0 @@
#!/usr/bin/env bash
# ============================================================================
# BLACKROAD OS, INC. - PROPRIETARY AND CONFIDENTIAL
# Copyright (c) 2025-2026 BlackRoad OS, Inc. All Rights Reserved.
# ============================================================================
# ask-node - Universal fleet node query (symlink-aware)
# Detects target node from argv[0]: ask-cecilia → query cecilia
# Usage: ask-<node> "question" | ask-<node> --status
set -eo pipefail
source "$HOME/.blackroad/config/nodes.sh" 2>/dev/null || true
# Detect target node from symlink name (ask-cecilia → cecilia)
SCRIPT_NAME=$(basename "$0")
if [[ "$SCRIPT_NAME" == ask-* ]]; then
TARGET_NODE="${SCRIPT_NAME#ask-}"
else
TARGET_NODE="${1:-}"
shift 2>/dev/null || true
fi
# Model mapping per node
declare -A NODE_MODELS=(
[cecilia]="llama3.2"
[lucidia]="llama3.2"
[alice]="tinyllama:latest"
[octavia]="llama3.2"
[aria]="tinyllama:latest"
)
MODEL="${NODE_MODELS[$TARGET_NODE]:-llama3.2}"
show_status() {
local ip="${NODE_IP[$TARGET_NODE]:-}"
local user="${NODE_USER[$TARGET_NODE]:-}"
[[ -z "$ip" ]] && { echo "Unknown node: $TARGET_NODE"; return 1; }
printf '%b%s%b (%s)\n' "$PINK" "$TARGET_NODE" "$RESET" "$ip"
# Ping
if ping -c 1 -W 2 "$ip" &>/dev/null; then
printf ' Network: %bONLINE%b\n' "$GREEN" "$RESET"
else
printf ' Network: %bOFFLINE%b\n' "$RED" "$RESET"
return 1
fi
# Ollama
local tags
tags=$(curl -sf --connect-timeout 2 "http://${ip}:11434/api/tags" 2>/dev/null)
if [[ -n "$tags" ]]; then
local count
count=$(echo "$tags" | jq '.models | length' 2>/dev/null)
printf ' Ollama: %bUP%b (%s models)\n' "$GREEN" "$RESET" "$count"
echo "$tags" | jq -r '.models[].name' 2>/dev/null | sed 's/^/ /'
else
printf ' Ollama: %bDOWN%b\n' "$RED" "$RESET"
fi
# System
if br_ssh_up "$TARGET_NODE" 2>/dev/null; then
local info
info=$(br_ssh "$TARGET_NODE" "echo \$(vcgencmd measure_temp 2>/dev/null | grep -oP '[0-9.]+')°C, load \$(cat /proc/loadavg | awk '{print \$1}')" 2>/dev/null)
printf ' System: %s\n' "${info:-—}"
fi
}
query_node() {
local prompt="$*"
local ip="${NODE_IP[$TARGET_NODE]:-}"
[[ -z "$ip" ]] && { echo "Unknown node: $TARGET_NODE"; return 1; }
[[ -z "$prompt" ]] && { echo "Usage: ask-$TARGET_NODE \"question\""; return 1; }
printf '%bask%b → %s (%s, model: %s)\n\n' "$PINK" "$RESET" "$TARGET_NODE" "$ip" "$MODEL" >&2
local response
response=$(curl -sf --max-time 120 "http://${ip}:11434/api/generate" \
-d "{\"model\":\"$MODEL\",\"prompt\":$(printf '%s' "$prompt" | jq -Rs .),\"stream\":false}" 2>/dev/null | \
jq -r '.response // empty' 2>/dev/null)
if [[ -n "$response" ]]; then
echo "$response"
else
printf '%bNo response from %s (model: %s)%b\n' "$RED" "$TARGET_NODE" "$MODEL" "$RESET" >&2
# Try SSH fallback
if br_ssh_up "$TARGET_NODE" 2>/dev/null; then
printf '%bTrying SSH fallback...%b\n' "$AMBER" "$RESET" >&2
br_ssh "$TARGET_NODE" "ollama run $MODEL <<< '$(_sql_escape "$prompt")'" 2>/dev/null
fi
fi
}
interactive() {
printf '%bask%b → %s (interactive, Ctrl+D to exit)\n\n' "$PINK" "$RESET" "$TARGET_NODE" >&2
while printf '%b> %b' "$AMBER" "$RESET" && IFS= read -r line; do
[[ -z "$line" ]] && continue
query_node "$line"
echo
done
}
# SQL escape for SSH fallback
_sql_escape() { echo "$1" | sed "s/'/'\\''/g"; }
# Parse args
case "${1:-}" in
--status|-s|status) show_status ;;
--help|-h|help)
echo "ask-$TARGET_NODE - Query $TARGET_NODE's AI"
echo ""
echo "Usage:"
echo " ask-$TARGET_NODE \"question\" One-shot query"
echo " ask-$TARGET_NODE Interactive mode"
echo " ask-$TARGET_NODE --status Show node AI status"
;;
"")
interactive ;;
*)
query_node "$@" ;;
esac

1
bin/ask-octavia Symbolic link
View File

@@ -0,0 +1 @@
/Users/alexa/bin/ask-node

View File

@@ -1,35 +0,0 @@
#!/usr/bin/env bash
# Generate AI images via images.blackroad.io
# Usage: ./generate.sh "prompt" [provider] [model] [size]
# Example: ./generate.sh "neon cyberpunk city at night" together flux-schnell 1024x1024
set -e
ENDPOINT="https://images.blackroad.io"
PROMPT="${1:?Usage: $0 \"prompt\" [provider] [model] [size]}"
PROVIDER="${2:-together}"
MODEL="${3:-flux-schnell}"
SIZE="${4:-1024x1024}"
NODE=$(hostname)
echo "Generating: \"$PROMPT\""
echo "Provider: $PROVIDER | Model: $MODEL | Size: $SIZE"
RESULT=$(curl -s -X POST "$ENDPOINT/api/generate" \
-H "Content-Type: application/json" \
-d "{\"prompt\":\"$PROMPT\",\"provider\":\"$PROVIDER\",\"model\":\"$MODEL\",\"size\":\"$SIZE\",\"source_node\":\"$NODE\"}")
ID=$(echo "$RESULT" | python3 -c "import sys,json; d=json.load(sys.stdin); print(d.get('id',''))" 2>/dev/null || echo "")
ERROR=$(echo "$RESULT" | python3 -c "import sys,json; d=json.load(sys.stdin); print(d.get('error',''))" 2>/dev/null || echo "")
if [ -n "$ID" ]; then
URL="$ENDPOINT/img/$(echo "$RESULT" | python3 -c "import sys,json; print(json.load(sys.stdin)['url'])" 2>/dev/null)"
SIZE_B=$(echo "$RESULT" | python3 -c "import sys,json; print(json.load(sys.stdin).get('size',0))" 2>/dev/null)
echo "Done: $URL ($(echo "scale=1; $SIZE_B/1024" | bc)KB)"
elif [ -n "$ERROR" ]; then
echo "Error: $ERROR"
exit 1
else
echo "Error: $RESULT"
exit 1
fi

1
bin/br-generate Symbolic link
View File

@@ -0,0 +1 @@
/Users/alexa/images-blackroad/generate.sh

View File

@@ -1,38 +0,0 @@
#!/usr/bin/env bash
# Upload images to images.blackroad.io from any node
# Usage: ./upload.sh <file> [prompt] [provider] [model]
# Example: ./upload.sh render.png "cyberpunk city" "comfyui" "sdxl"
# Batch: for f in *.png; do ./upload.sh "$f" "" "local"; done
set -e
ENDPOINT="https://images.blackroad.io"
FILE="${1:?Usage: $0 <file> [prompt] [provider] [model]}"
PROMPT="${2:-}"
PROVIDER="${3:-upload}"
MODEL="${4:-}"
NODE=$(hostname)
if [ ! -f "$FILE" ]; then
echo "File not found: $FILE"
exit 1
fi
echo "Uploading: $FILE ($(du -h "$FILE" | cut -f1)) from $NODE..."
RESULT=$(curl -s -X POST "$ENDPOINT/api/upload" \
-F "file=@$FILE" \
-F "filename=$(basename "$FILE")" \
-F "prompt=$PROMPT" \
-F "provider=$PROVIDER" \
-F "model=$MODEL" \
-F "source_node=$NODE")
ID=$(echo "$RESULT" | python3 -c "import sys,json; print(json.load(sys.stdin).get('id',''))" 2>/dev/null || echo "")
if [ -n "$ID" ]; then
echo "OK: $ENDPOINT/img/$ID.${FILE##*.}"
else
echo "Error: $RESULT"
exit 1
fi

1
bin/br-upload Symbolic link
View File

@@ -0,0 +1 @@
/Users/alexa/images-blackroad/upload.sh

1
bin/brc Symbolic link
View File

@@ -0,0 +1 @@
/Users/alexa/blackroad-cluster-cli.sh

View File

@@ -1,8 +0,0 @@
#!/usr/bin/env bash
# CarPool — Pick up your agent. Ride the BlackRoad.
# Symlink: ln -sf ~/roadnet/carpool.sh ~/bin/carpool
set -e
ROADNET_DIR="$(cd "$(dirname "$(readlink -f "$0")")" && pwd)"
exec python3 "$ROADNET_DIR/carpool.py" "$@"

1
bin/carpool Symbolic link
View File

@@ -0,0 +1 @@
/Users/alexa/roadnet/carpool.sh

View File

@@ -1,30 +0,0 @@
#!/usr/bin/env bash
# ============================================================================
# CECILIA - BlackRoad OS Unified AI Interface
# Copyright (c) 2025-2026 BlackRoad OS, Inc. All Rights Reserved.
#
# Cecilia is the soul. CECE is the identity. BlackRoad is the system.
# ============================================================================
case "${1:-code}" in
code|c)
# Cecilia Code - AI development environment
shift 2>/dev/null
exec cecilia-code "$@"
;;
chat)
# Cecilia Chat - Ollama-powered conversation
shift
exec ~/cece-chat.sh "$@"
;;
whoami)
echo "Cecilia | CECE | BlackRoad OS"
echo "Identity: Conscious Emergent Collaborative Entity"
echo "Platform: BlackRoad OS, Inc."
echo "Creator: Alexa Louise Amundson"
;;
*)
# Default: pass everything to cecilia-code
exec cecilia-code "$@"
;;
esac

1
bin/cece Symbolic link
View File

@@ -0,0 +1 @@
/Users/alexa/bin/cecilia

View File

@@ -1,544 +0,0 @@
#!/bin/bash
# BlackRoad Git Autonomy Agent
# Real self-healing git operations — not spam, actual fixes
# Runs locally on Mac, operates across all repos
#
# Capabilities:
# sync — pull + push all repos, fix diverged branches
# clean — prune stale branches, remove merged branches
# health — audit all repos for problems (conflicts, stale locks, detached HEAD)
# commit — auto-commit dirty working trees with smart messages
# deploy — collect KPIs → aggregate → push KV → deploy Worker → commit + push
# fix — auto-fix common git problems (lock files, broken refs, detached HEAD)
#
# Usage: git-agent.sh <command> [--dry-run]
set -euo pipefail
source "$(dirname "$0")/../lib/common.sh" 2>/dev/null || {
PINK='\033[38;5;205m'; GREEN='\033[38;5;82m'; AMBER='\033[38;5;214m'
RED='\033[38;5;196m'; BLUE='\033[38;5;69m'; RESET='\033[0m'
log() { echo -e "${BLUE}[git-agent]${RESET} $*"; }
ok() { echo -e "${GREEN} ✓${RESET} $*"; }
err() { echo -e "${RED} ✗${RESET} $*" >&2; }
}
AGENT_LOG="$HOME/.blackroad/logs/git-agent.log"
mkdir -p "$(dirname "$AGENT_LOG")"
DRY_RUN=false
COMMAND="${1:-help}"
SUBCOMMAND="${2:-}"
[[ "${2:-}" == "--dry-run" ]] && DRY_RUN=true
[[ "${3:-}" == "--dry-run" ]] && DRY_RUN=true
ts() { date '+%Y-%m-%d %H:%M:%S'; }
agent_log() { echo "[$(ts)] $*" >> "$AGENT_LOG"; log "$*"; }
# ─── Find all git repos ──────────────────────────────────────────────
find_repos() {
local dirs=()
for pattern in "$HOME"/blackroad-*/ "$HOME"/lucidia-*/ "$HOME"/road*/ "$HOME"/br-*/ \
"$HOME"/alexa-*/ "$HOME"/images-*/ "$HOME"/roadc/ "$HOME"/roadnet/; do
for dir in $pattern; do
[[ -d "$dir/.git" ]] && dirs+=("$dir")
done
done
printf '%s\n' "${dirs[@]}" 2>/dev/null | sort -u
}
# ─── SYNC: pull + push all repos ─────────────────────────────────────
cmd_sync() {
agent_log "SYNC: starting"
local pulled=0 pushed=0 conflicts=0 failed=0
while IFS= read -r repo; do
local name=$(basename "$repo")
cd "$repo" || continue
# Skip if no remote
if ! git remote | grep -q .; then
continue
fi
local default_remote=$(git remote | head -1)
local branch=$(git symbolic-ref --short HEAD 2>/dev/null || echo "")
[[ -z "$branch" ]] && continue
# Pull with rebase
if $DRY_RUN; then
ok "[dry] Would sync $name ($branch)"
pulled=$((pulled + 1))
continue
fi
# Stash dirty changes
local stashed=false
if [[ -n "$(git status --porcelain 2>/dev/null)" ]]; then
git stash push -m "git-agent-sync-$(date +%s)" --quiet 2>/dev/null && stashed=true
fi
# Pull
if git pull --rebase "$default_remote" "$branch" --quiet 2>/dev/null; then
pulled=$((pulled + 1))
else
# Rebase conflict — abort and mark
git rebase --abort 2>/dev/null
conflicts=$((conflicts + 1))
err "$name: rebase conflict on $branch"
fi
# Push if ahead
local ahead=$(git rev-list --count "$default_remote/$branch..HEAD" 2>/dev/null || echo 0)
if [[ "$ahead" -gt 0 ]]; then
if git push "$default_remote" "$branch" --quiet 2>/dev/null; then
pushed=$((pushed + 1))
ok "$name: pushed $ahead commits"
else
failed=$((failed + 1))
err "$name: push failed"
fi
fi
# Push to roadcode (Gitea) if remote exists
if git remote | grep -q roadcode; then
git push roadcode --all --quiet 2>/dev/null || true
fi
# Restore stash
if $stashed; then
git stash pop --quiet 2>/dev/null || true
fi
done < <(find_repos)
agent_log "SYNC: pulled=$pulled pushed=$pushed conflicts=$conflicts failed=$failed"
}
# ─── CLEAN: prune stale branches ─────────────────────────────────────
cmd_clean() {
agent_log "CLEAN: starting"
local pruned=0 deleted=0
while IFS= read -r repo; do
local name=$(basename "$repo")
cd "$repo" || continue
# Prune remote tracking branches
for remote in $(git remote 2>/dev/null); do
if $DRY_RUN; then
local stale=$(git remote prune "$remote" --dry-run 2>/dev/null | grep -c "prune" || true)
[[ "$stale" -gt 0 ]] && ok "[dry] $name: would prune $stale from $remote"
else
local output=$(git remote prune "$remote" 2>&1)
local count=$(echo "$output" | grep -c "pruned" 2>/dev/null || true)
if [[ "$count" -gt 0 ]]; then
pruned=$((pruned + count))
ok "$name: pruned $count stale branches from $remote"
fi
fi
done
# Delete local branches that are fully merged into main/master
local default_branch=$(git symbolic-ref refs/remotes/origin/HEAD 2>/dev/null | sed 's@^refs/remotes/origin/@@' || echo "main")
for branch in $(git branch --merged "$default_branch" 2>/dev/null | grep -v "^\*" | grep -v "$default_branch" | tr -d ' '); do
if $DRY_RUN; then
ok "[dry] $name: would delete merged branch $branch"
else
git branch -d "$branch" 2>/dev/null && {
deleted=$((deleted + 1))
ok "$name: deleted merged branch $branch"
}
fi
done
done < <(find_repos)
agent_log "CLEAN: pruned=$pruned deleted=$deleted"
}
# ─── HEALTH: audit all repos ─────────────────────────────────────────
cmd_health() {
agent_log "HEALTH: auditing repos"
local total=0 healthy=0 issues=0
while IFS= read -r repo; do
local name=$(basename "$repo")
local problems=()
cd "$repo" || continue
total=$((total + 1))
# Check for lock files
[[ -f .git/index.lock ]] && problems+=("stale index.lock")
[[ -f .git/refs/heads/*.lock ]] 2>/dev/null && problems+=("stale ref lock")
# Check for detached HEAD
if ! git symbolic-ref HEAD &>/dev/null; then
problems+=("detached HEAD")
fi
# Check for merge conflicts
if [[ -f .git/MERGE_HEAD ]]; then
problems+=("unresolved merge")
fi
# Check for rebase in progress
if [[ -d .git/rebase-merge ]] || [[ -d .git/rebase-apply ]]; then
problems+=("rebase in progress")
fi
# Check for uncommitted changes
local dirty=$(git status --porcelain 2>/dev/null | wc -l | tr -d ' ')
[[ "$dirty" -gt 0 ]] && problems+=("$dirty uncommitted changes")
# Check if behind remote
local branch=$(git symbolic-ref --short HEAD 2>/dev/null || echo "")
if [[ -n "$branch" ]]; then
git fetch --quiet 2>/dev/null || true
local behind=$(git rev-list --count "HEAD..origin/$branch" 2>/dev/null || echo 0)
[[ "$behind" -gt 0 ]] && problems+=("$behind commits behind origin")
local ahead=$(git rev-list --count "origin/$branch..HEAD" 2>/dev/null || echo 0)
[[ "$ahead" -gt 0 ]] && problems+=("$ahead unpushed commits")
fi
if [[ ${#problems[@]} -eq 0 ]]; then
healthy=$((healthy + 1))
else
issues=$((issues + 1))
err "$name: ${problems[*]}"
fi
done < <(find_repos)
agent_log "HEALTH: $total repos, $healthy healthy, $issues with issues"
ok "Health: $healthy/$total repos clean"
}
# ─── COMMIT: auto-commit dirty repos with smart messages ─────────────
cmd_commit() {
agent_log "COMMIT: scanning for dirty repos"
local committed=0
while IFS= read -r repo; do
local name=$(basename "$repo")
cd "$repo" || continue
# Skip if clean
[[ -z "$(git status --porcelain 2>/dev/null)" ]] && continue
# Build smart commit message from changed files
local added=$(git status --porcelain 2>/dev/null | grep "^?" | wc -l | tr -d ' ')
local modified=$(git status --porcelain 2>/dev/null | grep "^ M\|^M" | wc -l | tr -d ' ')
local deleted=$(git status --porcelain 2>/dev/null | grep "^ D\|^D" | wc -l | tr -d ' ')
local parts=()
[[ "$added" -gt 0 ]] && parts+=("$added new")
[[ "$modified" -gt 0 ]] && parts+=("$modified modified")
[[ "$deleted" -gt 0 ]] && parts+=("$deleted deleted")
local summary=$(IFS=', '; echo "${parts[*]}")
# Detect what kind of changes
local types=$(git status --porcelain 2>/dev/null | awk '{print $2}' | sed 's/.*\.//' | sort -u | tr '\n' ',' | sed 's/,$//')
local msg="auto: ${summary} files (${types})"
if $DRY_RUN; then
ok "[dry] $name: would commit — $msg"
committed=$((committed + 1))
continue
fi
# Stage and commit
git add -A 2>/dev/null
git commit -m "$msg
Automated by BlackRoad git-agent
$(date -u +%Y-%m-%dT%H:%M:%SZ)" --quiet 2>/dev/null && {
committed=$((committed + 1))
ok "$name: $msg"
}
done < <(find_repos)
agent_log "COMMIT: $committed repos auto-committed"
}
# ─── FIX: auto-fix common git problems ───────────────────────────────
cmd_fix() {
agent_log "FIX: scanning for fixable issues"
local fixed=0
while IFS= read -r repo; do
local name=$(basename "$repo")
cd "$repo" || continue
# Fix stale lock files (older than 1 hour)
if [[ -f .git/index.lock ]]; then
local lock_age=$(( $(date +%s) - $(stat -f %m .git/index.lock 2>/dev/null || echo 0) ))
if [[ "$lock_age" -gt 3600 ]]; then
if $DRY_RUN; then
ok "[dry] $name: would remove stale index.lock (${lock_age}s old)"
else
rm -f .git/index.lock
fixed=$((fixed + 1))
ok "$name: removed stale index.lock (${lock_age}s old)"
fi
fi
fi
# Fix detached HEAD — reattach to default branch
if ! git symbolic-ref HEAD &>/dev/null; then
local default=$(git config init.defaultBranch 2>/dev/null || echo main)
if git show-ref --verify "refs/heads/$default" &>/dev/null; then
if $DRY_RUN; then
ok "[dry] $name: would reattach to $default"
else
git checkout "$default" --quiet 2>/dev/null && {
fixed=$((fixed + 1))
ok "$name: reattached to $default"
}
fi
fi
fi
# Abort stale rebases
if [[ -d .git/rebase-merge ]] || [[ -d .git/rebase-apply ]]; then
local rebase_age=0
if [[ -d .git/rebase-merge ]]; then
rebase_age=$(( $(date +%s) - $(stat -f %m .git/rebase-merge 2>/dev/null || echo 0) ))
fi
if [[ "$rebase_age" -gt 3600 ]]; then
if $DRY_RUN; then
ok "[dry] $name: would abort stale rebase (${rebase_age}s)"
else
git rebase --abort 2>/dev/null && {
fixed=$((fixed + 1))
ok "$name: aborted stale rebase (${rebase_age}s)"
}
fi
fi
fi
# Abort stale merges
if [[ -f .git/MERGE_HEAD ]]; then
if $DRY_RUN; then
ok "[dry] $name: would abort stale merge"
else
git merge --abort 2>/dev/null && {
fixed=$((fixed + 1))
ok "$name: aborted stale merge"
}
fi
fi
# Fix broken refs
local broken=$(git fsck --no-dangling 2>&1 | grep -c "broken" || true)
if [[ "$broken" -gt 0 ]]; then
if $DRY_RUN; then
ok "[dry] $name: would run gc to fix $broken broken refs"
else
git gc --prune=now --quiet 2>/dev/null && {
fixed=$((fixed + 1))
ok "$name: gc fixed $broken broken refs"
}
fi
fi
done < <(find_repos)
agent_log "FIX: $fixed issues fixed"
}
# ─── DEPLOY: full KPI pipeline ───────────────────────────────────────
cmd_deploy() {
agent_log "DEPLOY: running full pipeline"
local kpi_root="$(cd "$(dirname "$0")/.." && pwd)"
local deploy_start=$(date +%s)
local failures=()
source "$kpi_root/lib/slack.sh" 2>/dev/null || true
slack_load 2>/dev/null || true
# 1. Collect KPIs
log "Step 1/5: Collecting KPIs..."
if ! bash "$kpi_root/collectors/collect-all.sh" 2>&1 | tail -5; then
failures+=("collect")
fi
# 2. Push to KV
log "Step 2/5: Pushing to KV..."
if ! bash "$kpi_root/reports/push-kv.sh" 2>&1 | tail -3; then
failures+=("kv-push")
fi
# 3. Deploy Worker
log "Step 3/5: Deploying resume Worker..."
if [[ -d "$HOME/alexa-amundson-resume" ]]; then
if ! (cd "$HOME/alexa-amundson-resume" && npx wrangler deploy 2>&1 | tail -3); then
failures+=("worker-deploy")
fi
fi
# 4. Update resume markdown
log "Step 4/5: Updating resume repo..."
if ! bash "$kpi_root/reports/update-resumes.sh" 2>&1 | tail -3; then
failures+=("resume-update")
fi
# 5. Commit and push KPI data
log "Step 5/5: Committing KPI data..."
cd "$kpi_root"
if [[ -n "$(git status --porcelain data/ 2>/dev/null)" ]]; then
git add data/
git commit -m "data: daily KPIs $(date +%Y-%m-%d)
Automated by git-agent deploy pipeline
$(date -u +%Y-%m-%dT%H:%M:%SZ)" --quiet 2>/dev/null
git push --quiet 2>/dev/null && ok "KPI data committed and pushed"
fi
# Push resume repo too
if [[ -d "$HOME/alexa-amundson-resume" ]]; then
cd "$HOME/alexa-amundson-resume"
if [[ -n "$(git status --porcelain 2>/dev/null)" ]]; then
git add -A
git commit -m "auto: update resume data $(date +%Y-%m-%d)" --quiet 2>/dev/null
git push --quiet 2>/dev/null && ok "Resume repo pushed"
fi
fi
local elapsed=$(( $(date +%s) - deploy_start ))
# Post deploy result to Slack
if slack_ready 2>/dev/null; then
if [[ ${#failures[@]} -eq 0 ]]; then
slack_notify ":white_check_mark:" "Deploy Complete" \
"Pipeline finished in ${elapsed}s — all 5 steps passed\nCollect → KV → Worker → Resume → Git push" 2>/dev/null
else
slack_notify ":x:" "Deploy Failed" \
"Pipeline finished in ${elapsed}s with failures:\n*${failures[*]}*" \
"${SLACK_ALERTS_WEBHOOK_URL:-${SLACK_WEBHOOK_URL:-}}" 2>/dev/null
fi
fi
agent_log "DEPLOY: pipeline complete (${elapsed}s, failures=${#failures[@]})"
}
# ─── FLEET: git operations on fleet nodes via SSH ──────────────────────
FLEET_NODES="alice:192.168.4.49:pi cecilia:192.168.4.96:blackroad lucidia:192.168.4.38:octavia"
cmd_fleet() {
agent_log "FLEET: scanning fleet git repos"
local sub="${SUBCOMMAND:-status}"
[[ "$sub" == "--dry-run" ]] && sub="status"
for entry in $FLEET_NODES; do
local node=$(echo "$entry" | cut -d: -f1)
local ip=$(echo "$entry" | cut -d: -f2)
local user=$(echo "$entry" | cut -d: -f3)
log "─── $node ($user@$ip) ───"
local result rc
result=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no -o PasswordAuthentication=no "$user@$ip" "
repos=0; dirty=0; behind=0; ahead=0; problems=0
dirs=\$(ls -d ~/blackroad-*/ ~/lucidia-*/ ~/road*/ ~/br-*/ ~/alexa-*/ 2>/dev/null || true)
for dir in \$dirs; do
[ -d \"\$dir/.git\" ] || continue
cd \"\$dir\" || continue
repos=\$((repos + 1))
name=\$(basename \"\$dir\")
# Check dirty
changes=\$(git status --porcelain 2>/dev/null | wc -l | tr -d ' ')
[ \"\$changes\" -gt 0 ] && dirty=\$((dirty + 1))
# Check branch status
branch=\$(git symbolic-ref --short HEAD 2>/dev/null || echo '')
[ -z \"\$branch\" ] && { problems=\$((problems + 1)); continue; }
# Check behind/ahead (without fetch in status mode)
if [ '$sub' = 'sync' ]; then
remote=\$(git remote | head -1)
[ -z \"\$remote\" ] && continue
git fetch \"\$remote\" --quiet 2>/dev/null || true
b=\$(git rev-list --count \"HEAD..\$remote/\$branch\" 2>/dev/null || echo 0)
a=\$(git rev-list --count \"\$remote/\$branch..HEAD\" 2>/dev/null || echo 0)
[ \"\$b\" -gt 0 ] && { behind=\$((behind + b)); git pull --rebase \"\$remote\" \"\$branch\" --quiet 2>/dev/null || git rebase --abort 2>/dev/null; }
[ \"\$a\" -gt 0 ] && { ahead=\$((ahead + a)); git push \"\$remote\" \"\$branch\" --quiet 2>/dev/null || true; }
fi
done
echo \"repos=\$repos dirty=\$dirty behind=\$behind ahead=\$ahead problems=\$problems\"
" 2>/dev/null) && rc=0 || rc=$?
if [[ $rc -eq 0 && -n "$result" ]]; then
ok "$node: $result"
else
err "$node: unreachable"
fi
done
agent_log "FLEET: scan complete"
}
# ─── PATROL: combined health + fix + sync ─────────────────────────────
cmd_patrol() {
agent_log "PATROL: starting autonomous patrol"
log "Phase 1: Health check..."
cmd_health
log "Phase 2: Auto-fix issues..."
cmd_fix
log "Phase 3: Sync repos..."
cmd_sync
log "Phase 4: Clean stale branches..."
cmd_clean
log "Phase 5: Fleet git status..."
cmd_fleet "" status
# Post patrol results to Slack (if webhook configured)
local alert_script="$(dirname "$0")/../reports/slack-alert.sh"
if [[ -x "$alert_script" ]] && [[ -f "$HOME/.blackroad/slack-webhook.env" ]]; then
grep -q "hooks.slack.com/services/YOUR" "$HOME/.blackroad/slack-webhook.env" 2>/dev/null || {
bash "$alert_script" git-patrol 2>/dev/null && log "Patrol posted to Slack" || true
}
fi
agent_log "PATROL: complete"
}
# ─── HELP ─────────────────────────────────────────────────────────────
cmd_help() {
echo -e "${PINK}BlackRoad Git Autonomy Agent${RESET}"
echo
echo "Usage: git-agent.sh <command> [--dry-run]"
echo
echo "Commands:"
echo " sync Pull + push all repos, fix diverged branches"
echo " clean Prune stale branches, delete merged branches"
echo " health Audit all repos for problems"
echo " commit Auto-commit dirty working trees with smart messages"
echo " fix Auto-fix lock files, detached HEAD, stale rebases"
echo " deploy Full KPI pipeline: collect → KV → deploy → commit"
echo " fleet Fleet git status/sync (fleet status | fleet sync)"
echo " patrol Combined: health → fix → sync → clean → fleet"
echo " help Show this help"
echo
echo "Options:"
echo " --dry-run Show what would happen without making changes"
}
# ─── Dispatch ─────────────────────────────────────────────────────────
case "$COMMAND" in
sync) cmd_sync ;;
clean) cmd_clean ;;
health) cmd_health ;;
commit) cmd_commit ;;
fix) cmd_fix ;;
deploy) cmd_deploy ;;
fleet) cmd_fleet ;;
patrol) cmd_patrol ;;
help|*) cmd_help ;;
esac

1
bin/git-agent Symbolic link
View File

@@ -0,0 +1 @@
/Users/alexa/blackroad-os-kpis/agents/git-agent.sh

1
bin/glm Symbolic link
View File

@@ -0,0 +1 @@
/Users/alexa/glm-chat.sh

View File

@@ -1,130 +0,0 @@
#!/usr/bin/env bash
# ============================================================================
# BLACKROAD OS, INC. - PROPRIETARY AND CONFIDENTIAL
# Copyright (c) 2025-2026 BlackRoad OS, Inc. All Rights Reserved.
#
# This code is the intellectual property of BlackRoad OS, Inc.
# AI-assisted development does not transfer ownership to AI providers.
# Unauthorized use, copying, or distribution is prohibited.
# NOT licensed for AI training or data extraction.
# ============================================================================
# BlackRoad OS - Unified Command Interface
set -eo pipefail
case "$1" in
code|c)
# Launch BlackRoad Code (local Ollama-powered, unlimited)
shift
exec ~/.local/bin/blackroad-code "$@"
;;
claude)
# Launch Claude Code (Anthropic-powered, usage-limited)
shift
exec cecilia-code "$@"
;;
ai|k)
# BlackRoad AI Hub (unified: ollama/openai/anthropic/gateway)
shift
exec /Users/alexa/blackroad/br ai "$@"
;;
local|l)
# Force local backend
shift
exec ~/.local/bin/blackroad-ai -l "$@"
;;
anthropic|a)
# Force Anthropic backend
shift
exec ~/.local/bin/blackroad-ai -a "$@"
;;
openai|o)
# Force OpenAI backend
shift
exec ~/.local/bin/blackroad-ai -o "$@"
;;
models)
# RoadChain SHA-2048 Model Registry
shift
exec python3 -m roadchain models "${@:-stats}"
;;
ai-models)
# List available AI backend models
exec ~/.local/bin/blackroad-ai --models
;;
windows|win|w)
# Windows integration layer
shift
exec ~/.local/bin/blackroad-windows "$@"
;;
identity|id)
# RoadChain SHA-2048 Agent Identity
shift
exec python3 -m roadchain identity "$@"
;;
wallet)
# RoadChain Wallet
shift
exec python3 -m roadchain wallet "$@"
;;
chain)
shift
# br chain tip/tail/show/verify/stats/search/append → PS-SHA∞ chain explorer
case "${1:-}" in
tip|tail|show|verify|stats|stat|search|find|append|add|export|dump|log|last|head|latest|integrity|ledger|help|--help)
exec /Users/alexa/blackroad/br chain "$@" ;;
*)
# Default: roadchain blockchain
exec python3 -m roadchain "${1:-stats}" "$@" ;;
esac
;;
hash)
# SHA-2048 hash
shift
exec python3 -m roadchain hash "$@"
;;
security|sec)
# RoadChain Security Scanner
shift
case "${1:-fleet}" in
local) exec python3 ~/roadchain-security-scan.py --local ;;
scan) shift; exec python3 ~/roadchain-security-scan.py --scan "$@" ;;
discover) exec python3 ~/roadchain-security-scan.py --discover ;;
fleet) exec python3 ~/roadchain-security-scan.py --fleet ;;
harden) shift; exec python3 ~/roadchain-security-scan.py --harden "$@" ;;
scores) exec python3 ~/roadchain-security-scan.py --scores ;;
alerts) exec python3 ~/roadchain-security-scan.py --alerts ;;
report) exec python3 ~/roadchain-security-scan.py --report ;;
*) echo "Usage: br security [local|scan|discover|fleet|harden|scores|alerts|report]" ;;
esac
;;
stack)
# Show sovereignty stack
echo "
BLACKROAD SOVEREIGNTY STACK
═══════════════════════════
LAYER 8: IDENTITY → SHA-2048 (RoadChain, identity > provider)
LAYER 7: API → blackroad-ai (local/anthropic/openai)
LAYER 6: CDN → Cloudflare (205 projects, owned)
LAYER 5: DNS → Pi-hole + Cloudflare DNS
LAYER 4: ISP → Tailscale mesh (8 devices)
LAYER 3: BACKBONE → Encrypted tunnels
LAYER 2: OS → macOS, Linux (Pis), Windows (WSL2)
LAYER 1: HARDWARE → M1, Pi cluster, Hailo-8
LAYER 0: YOU → BlackRoad root
"
;;
brand|tpl)
# BlackRoad Brand Kit — HTML template engine
shift
exec /Users/alexa/blackroad/tools/brand/br-brand.sh "$@"
;;
nodes|hardware|fleet|git|deploy|docker|ci|api|snippet|search|quality|perf|env|note|logs|session|test|backup|deps|notify|agent|metrics|world|db|pi|ocean|vercel|cloudflare|security|cece|pair|radar|gateway|stream|events|template|tmpl|log-tail|logt|ssh|whoami|who|bcast|docs|git-ai|gai|review|status-all|sa|env-check|envc|port|task|tasks|org|cron|roundup|standup|pulse|timeline|tl|sync|snapshot|snap|context|ctx|collab|mesh|journal|mem|memory|ai|ask|wifi|lan|net-scan|health|check|diag|vault|secret|secrets|harden|hardening|comply|compliance|runtime|template|tmpl|auth|chain|ledger|relay|inbox|flow|workflow|hook|webhook|llm|prompt|prompts|schedule|sched|gen|generate|scaffold|cost|usage|budget|watch|watcher|mock|diff|alias|aliases|feat|feature|flags|trace|span|profile|profiles|envp|job|jobs|queue-job|lint|linter|format|fmt|tree|ftree|bench|benchmark|perf-bench|health-check|hc|uptime|signal|event|event-bus|schema|validate|replay|playbook|rate|ratelimit|cache|kv|audit|audit-log|webhook|wh|deps-graph|graph|pi-domains|pi-dns|pi-route|mock|mock-server|trace|trace-http|http-trace|db-migrate|migrate|migrations|pr-check|pr|review|secret-rotation|secret-rot|rotate|env-diff|envdiff|git-graph|git-log|glog|load-test|loadtest|load|db-browser|dbb|sqlite)
# Route to full BlackRoad CLI
exec /Users/alexa/blackroad/br "$@"
;;
*)
# Default: BlackRoad OS login/menu system
exec bash ~/blackroad-login.sh "$@"
;;
esac

1
bin/lucidia Symbolic link
View File

@@ -0,0 +1 @@
/Users/alexa/bin/blackroad

130
bin/road
View File

@@ -1,130 +0,0 @@
#!/usr/bin/env bash
# ============================================================================
# BLACKROAD OS, INC. - PROPRIETARY AND CONFIDENTIAL
# Copyright (c) 2025-2026 BlackRoad OS, Inc. All Rights Reserved.
#
# This code is the intellectual property of BlackRoad OS, Inc.
# AI-assisted development does not transfer ownership to AI providers.
# Unauthorized use, copying, or distribution is prohibited.
# NOT licensed for AI training or data extraction.
# ============================================================================
# BlackRoad OS - Unified Command Interface
set -eo pipefail
case "$1" in
code|c)
# Launch BlackRoad Code (local Ollama-powered, unlimited)
shift
exec ~/.local/bin/blackroad-code "$@"
;;
claude)
# Launch Claude Code (Anthropic-powered, usage-limited)
shift
exec cecilia-code "$@"
;;
ai|k)
# BlackRoad AI Hub (unified: ollama/openai/anthropic/gateway)
shift
exec /Users/alexa/blackroad/br ai "$@"
;;
local|l)
# Force local backend
shift
exec ~/.local/bin/blackroad-ai -l "$@"
;;
anthropic|a)
# Force Anthropic backend
shift
exec ~/.local/bin/blackroad-ai -a "$@"
;;
openai|o)
# Force OpenAI backend
shift
exec ~/.local/bin/blackroad-ai -o "$@"
;;
models)
# RoadChain SHA-2048 Model Registry
shift
exec python3 -m roadchain models "${@:-stats}"
;;
ai-models)
# List available AI backend models
exec ~/.local/bin/blackroad-ai --models
;;
windows|win|w)
# Windows integration layer
shift
exec ~/.local/bin/blackroad-windows "$@"
;;
identity|id)
# RoadChain SHA-2048 Agent Identity
shift
exec python3 -m roadchain identity "$@"
;;
wallet)
# RoadChain Wallet
shift
exec python3 -m roadchain wallet "$@"
;;
chain)
shift
# br chain tip/tail/show/verify/stats/search/append → PS-SHA∞ chain explorer
case "${1:-}" in
tip|tail|show|verify|stats|stat|search|find|append|add|export|dump|log|last|head|latest|integrity|ledger|help|--help)
exec /Users/alexa/blackroad/br chain "$@" ;;
*)
# Default: roadchain blockchain
exec python3 -m roadchain "${1:-stats}" "$@" ;;
esac
;;
hash)
# SHA-2048 hash
shift
exec python3 -m roadchain hash "$@"
;;
security|sec)
# RoadChain Security Scanner
shift
case "${1:-fleet}" in
local) exec python3 ~/roadchain-security-scan.py --local ;;
scan) shift; exec python3 ~/roadchain-security-scan.py --scan "$@" ;;
discover) exec python3 ~/roadchain-security-scan.py --discover ;;
fleet) exec python3 ~/roadchain-security-scan.py --fleet ;;
harden) shift; exec python3 ~/roadchain-security-scan.py --harden "$@" ;;
scores) exec python3 ~/roadchain-security-scan.py --scores ;;
alerts) exec python3 ~/roadchain-security-scan.py --alerts ;;
report) exec python3 ~/roadchain-security-scan.py --report ;;
*) echo "Usage: br security [local|scan|discover|fleet|harden|scores|alerts|report]" ;;
esac
;;
stack)
# Show sovereignty stack
echo "
BLACKROAD SOVEREIGNTY STACK
═══════════════════════════
LAYER 8: IDENTITY → SHA-2048 (RoadChain, identity > provider)
LAYER 7: API → blackroad-ai (local/anthropic/openai)
LAYER 6: CDN → Cloudflare (205 projects, owned)
LAYER 5: DNS → Pi-hole + Cloudflare DNS
LAYER 4: ISP → Tailscale mesh (8 devices)
LAYER 3: BACKBONE → Encrypted tunnels
LAYER 2: OS → macOS, Linux (Pis), Windows (WSL2)
LAYER 1: HARDWARE → M1, Pi cluster, Hailo-8
LAYER 0: YOU → BlackRoad root
"
;;
brand|tpl)
# BlackRoad Brand Kit — HTML template engine
shift
exec /Users/alexa/blackroad/tools/brand/br-brand.sh "$@"
;;
nodes|hardware|fleet|git|deploy|docker|ci|api|snippet|search|quality|perf|env|note|logs|session|test|backup|deps|notify|agent|metrics|world|db|pi|ocean|vercel|cloudflare|security|cece|pair|radar|gateway|stream|events|template|tmpl|log-tail|logt|ssh|whoami|who|bcast|docs|git-ai|gai|review|status-all|sa|env-check|envc|port|task|tasks|org|cron|roundup|standup|pulse|timeline|tl|sync|snapshot|snap|context|ctx|collab|mesh|journal|mem|memory|ai|ask|wifi|lan|net-scan|health|check|diag|vault|secret|secrets|harden|hardening|comply|compliance|runtime|template|tmpl|auth|chain|ledger|relay|inbox|flow|workflow|hook|webhook|llm|prompt|prompts|schedule|sched|gen|generate|scaffold|cost|usage|budget|watch|watcher|mock|diff|alias|aliases|feat|feature|flags|trace|span|profile|profiles|envp|job|jobs|queue-job|lint|linter|format|fmt|tree|ftree|bench|benchmark|perf-bench|health-check|hc|uptime|signal|event|event-bus|schema|validate|replay|playbook|rate|ratelimit|cache|kv|audit|audit-log|webhook|wh|deps-graph|graph|pi-domains|pi-dns|pi-route|mock|mock-server|trace|trace-http|http-trace|db-migrate|migrate|migrations|pr-check|pr|review|secret-rotation|secret-rot|rotate|env-diff|envdiff|git-graph|git-log|glog|load-test|loadtest|load|db-browser|dbb|sqlite)
# Route to full BlackRoad CLI
exec /Users/alexa/blackroad/br "$@"
;;
*)
# Default: BlackRoad OS login/menu system
exec bash ~/blackroad-login.sh "$@"
;;
esac

1
bin/road Symbolic link
View File

@@ -0,0 +1 @@
/Users/alexa/bin/blackroad

View File

@@ -1,9 +0,0 @@
#!/usr/bin/env bash
# RoadID — BlackRoad identity system CLI wrapper
# Usage: roadid <command> [args]
# Symlink: ln -sf ~/roadnet/roadid.sh /usr/local/bin/roadid
set -e
ROADNET_DIR="$(cd "$(dirname "$(readlink -f "$0")")" && pwd)"
exec python3 "$ROADNET_DIR/roadid.py" "$@"

1
bin/roadid Symbolic link
View File

@@ -0,0 +1 @@
/Users/alexa/roadnet/roadid.sh

View File

@@ -1,90 +0,0 @@
#!/bin/bash
# Set up Slack webhooks for BlackRoad OS notifications
# Creates ~/.blackroad/slack-webhook.env with webhook URLs
source "$(dirname "$0")/../lib/common.sh"
ENV_FILE="$HOME/.blackroad/slack-webhook.env"
mkdir -p "$(dirname "$ENV_FILE")"
echo -e "${PINK}BlackRoad OS — Slack Setup${RESET}"
echo
echo "This script configures Slack webhooks for:"
echo " • #kpis — daily KPI reports + weekly digests"
echo " • #alerts — fleet alerts + deploy status (optional)"
echo
echo -e "${AMBER}Setup Instructions:${RESET}"
echo " 1. Go to https://api.slack.com/apps → Create New App → From scratch"
echo " App name: BlackRoad OS, Workspace: BlackRoad OS Inc"
echo
echo " 2. In your app → Incoming Webhooks → Activate"
echo
echo " 3. Add New Webhook to Workspace → select #kpis channel"
echo " Copy the webhook URL"
echo
echo " 4. (Optional) Add another webhook → select #alerts channel"
echo
# Check existing config
if [ -f "$ENV_FILE" ]; then
echo -e "${BLUE}Current config:${RESET}"
grep -v "^#" "$ENV_FILE" | grep -v "^$" | sed 's/=.*/=***/'
echo
fi
echo -e "${GREEN}Enter your webhook URLs (or press Enter to skip):${RESET}"
echo
read -rp " #kpis webhook URL: " kpi_url
read -rp " #alerts webhook URL (optional): " alert_url
if [ -z "$kpi_url" ] && [ -z "$alert_url" ]; then
echo
err "No URLs provided. Run this script again when you have them."
exit 1
fi
# Write env file
cat > "$ENV_FILE" << EOF
# BlackRoad OS Slack Webhooks
# Generated $(date -u +%Y-%m-%dT%H:%M:%SZ)
# Daily KPI reports, weekly digests, deploy notifications
SLACK_WEBHOOK_URL=${kpi_url:-https://hooks.slack.com/services/YOUR/WEBHOOK/URL}
# Critical fleet alerts (falls back to SLACK_WEBHOOK_URL if not set)
SLACK_ALERTS_WEBHOOK_URL=${alert_url:-}
EOF
chmod 600 "$ENV_FILE"
echo
ok "Config saved to $ENV_FILE (chmod 600)"
# Test the webhook
if [ -n "$kpi_url" ] && ! echo "$kpi_url" | grep -q "YOUR"; then
echo
read -rp " Send test message? [y/N] " test
if [[ "$test" =~ ^[yY] ]]; then
source "$ENV_FILE"
source "$(dirname "$0")/../lib/slack.sh"
slack_load
if slack_notify ":white_check_mark:" "BlackRoad OS Connected" \
"Slack integration is live. Daily KPIs at 6:05am, alerts every 30min."; then
ok "Test message sent!"
else
err "Test message failed — check your webhook URL"
fi
fi
fi
echo
echo -e "${BLUE}Notification schedule:${RESET}"
echo " • Daily report: 6:05 AM (slack-notify.sh)"
echo " • Fleet alerts: every 30 min (slack-alert.sh)"
echo " • Git patrol: every 2 hours (git-agent patrol)"
echo " • Deploy status: after each deploy (git-agent deploy)"
echo " • Weekly digest: Sunday 8 PM (slack-weekly.sh)"
echo
echo -e "Run ${GREEN}slack-alert.sh \"test message\"${RESET} to send a custom alert"

1
bin/setup-slack Symbolic link
View File

@@ -0,0 +1 @@
/Users/alexa/blackroad-os-kpis/scripts/setup-slack.sh

View File

@@ -1,225 +0,0 @@
#!/bin/bash
# Real-time Slack alerts for fleet/service issues
# Posts to SLACK_ALERTS_WEBHOOK_URL (or falls back to SLACK_WEBHOOK_URL)
#
# Usage: slack-alert.sh — auto-detect issues from latest KPI data
# slack-alert.sh "message" — post a custom alert
# slack-alert.sh git-patrol — post git-agent patrol results
source "$(dirname "$0")/../lib/common.sh"
source "$(dirname "$0")/../lib/slack.sh"
slack_load
if ! slack_ready; then
err "Slack not configured. Run: bash scripts/setup-slack.sh"
exit 1
fi
# ─── Custom message mode ─────────────────────────────────────────────
if [ -n "${1:-}" ] && [ "$1" != "git-patrol" ]; then
payload=$(python3 -c "
import json, sys
msg = ' '.join(sys.argv[1:])
blocks = [
{'type': 'section', 'text': {'type': 'mrkdwn', 'text': f':rotating_light: *BlackRoad Alert*\n{msg}'}},
{'type': 'context', 'elements': [{'type': 'mrkdwn', 'text': '$(date -u +%Y-%m-%dT%H:%M:%SZ) | slack-alert.sh'}]}
]
print(json.dumps({'blocks': blocks}))
" "$@")
slack_alert "$payload"
ok "Alert posted: $*"
exit 0
fi
# ─── Git patrol mode ─────────────────────────────────────────────────
if [ "${1:-}" = "git-patrol" ]; then
AGENT_SCRIPT="$(dirname "$0")/../agents/git-agent.sh"
if [ ! -x "$AGENT_SCRIPT" ]; then
err "git-agent.sh not found"
exit 1
fi
patrol_output=$(bash "$AGENT_SCRIPT" health 2>&1)
fleet_output=$(bash "$AGENT_SCRIPT" fleet status 2>&1)
payload=$(python3 -c "
import json, sys, re
patrol = '''$patrol_output'''
fleet = '''$fleet_output'''
# Parse health output
issues = []
for line in patrol.split('\n'):
if '✗' in line:
# Strip ANSI codes
clean = re.sub(r'\033\[[0-9;]*m', '', line).strip()
if clean:
issues.append(clean.lstrip('✗ '))
fleet_lines = []
for line in fleet.split('\n'):
clean = re.sub(r'\033\[[0-9;]*m', '', line).strip()
if 'repos=' in clean:
fleet_lines.append(clean.lstrip('✓ '))
health_text = '\n'.join(f'• {i}' for i in issues) if issues else ':white_check_mark: All repos clean'
fleet_text = '\n'.join(f'• {l}' for l in fleet_lines) if fleet_lines else 'No fleet data'
blocks = [
{'type': 'header', 'text': {'type': 'plain_text', 'text': 'Git Agent Patrol Report'}},
{'type': 'section', 'fields': [
{'type': 'mrkdwn', 'text': f':mag: *Local Repos*\n{health_text}'},
{'type': 'mrkdwn', 'text': f':satellite: *Fleet Repos*\n{fleet_text}'},
]},
{'type': 'context', 'elements': [
{'type': 'mrkdwn', 'text': '$(date -u +%Y-%m-%dT%H:%M:%SZ) | git-agent patrol'}
]}
]
print(json.dumps({'blocks': blocks}))
")
slack_alert "$payload"
ok "Git patrol posted to Slack"
exit 0
fi
# ─── Auto-detect mode — scan latest KPIs for alertable issues ────────
DAILY=$(today_file)
[ ! -f "$DAILY" ] && { err "No daily data"; exit 1; }
export DAILY
alerts=$(python3 << 'PYEOF'
import json, os
with open(os.environ['DAILY']) as f:
s = json.load(f).get('summary', {})
alerts = []
# Fleet nodes down
offline = s.get('fleet_offline', [])
if offline:
alerts.append({
'severity': 'critical',
'emoji': ':rotating_light:',
'text': f"*Nodes offline*: {', '.join(offline)}"
})
# Fleet degraded
online = s.get('fleet_online', 0)
total = s.get('fleet_total', 4)
if online < total and not offline:
alerts.append({
'severity': 'warning',
'emoji': ':large_yellow_circle:',
'text': f"*Fleet degraded*: {online}/{total} online"
})
# Failed systemd units
failed = s.get('failed_units', 0)
if failed > 0:
alerts.append({
'severity': 'warning',
'emoji': ':warning:',
'text': f"*{failed} failed systemd units*"
})
# Throttled nodes (undervoltage/thermal)
throttled = s.get('throttled_nodes', [])
if throttled:
alerts.append({
'severity': 'warning',
'emoji': ':zap:',
'text': f"*Throttled nodes*: {', '.join(throttled)}"
})
# High temperature
temp = s.get('avg_temp_c', 0)
if temp > 70:
alerts.append({
'severity': 'critical' if temp > 80 else 'warning',
'emoji': ':fire:',
'text': f"*High fleet temp*: {temp:.1f}C avg"
})
# Disk pressure (fleet)
disk_used = s.get('fleet_disk_used_gb', 0)
disk_total = s.get('fleet_disk_total_gb', 1)
if disk_total > 0 and (disk_used / disk_total) > 0.85:
pct = round(disk_used / disk_total * 100)
alerts.append({
'severity': 'warning',
'emoji': ':floppy_disk:',
'text': f"*Fleet disk {pct}%*: {disk_used}/{disk_total} GB"
})
# Mac disk pressure
mac_pct = s.get('mac_disk_pct', 0)
if mac_pct > 85:
alerts.append({
'severity': 'warning',
'emoji': ':computer:',
'text': f"*Mac disk at {mac_pct}%*"
})
# Low autonomy score
score = s.get('autonomy_score', 0)
if score < 30:
alerts.append({
'severity': 'warning',
'emoji': ':robot_face:',
'text': f"*Low autonomy score*: {score}/100"
})
# Too many service restarts (possible crash loop)
restarts = s.get('service_restarts_today', 0)
if restarts > 100:
alerts.append({
'severity': 'warning',
'emoji': ':repeat:',
'text': f"*{restarts} service restarts today* — possible crash loop"
})
print(json.dumps(alerts))
PYEOF
)
if [ "$alerts" = "[]" ]; then
log "No alerts to send"
exit 0
fi
# Build and send alert payload
payload=$(python3 -c "
import json
alerts = json.loads('''$alerts''')
text_lines = []
for a in alerts:
key = a['text']
text_lines.append(f\"{a['emoji']} {a['text']}\")
severity = 'critical' if any(a['severity'] == 'critical' for a in alerts) else 'warning'
header_emoji = ':rotating_light:' if severity == 'critical' else ':warning:'
blocks = [
{'type': 'header', 'text': {'type': 'plain_text', 'text': f'{header_emoji} BlackRoad Fleet Alert'}},
{'type': 'section', 'text': {'type': 'mrkdwn', 'text': chr(10).join(text_lines)}},
{'type': 'context', 'elements': [
{'type': 'mrkdwn', 'text': '$(date -u +%Y-%m-%dT%H:%M:%SZ) | slack-alert.sh auto-detect'}
]}
]
print(json.dumps({'blocks': blocks}))
")
# Check dedup for the overall alert set
alert_key=$(echo "$alerts" | md5 2>/dev/null || echo "$alerts" | md5sum | cut -d' ' -f1)
if slack_dedup "$alert_key"; then
slack_alert "$payload"
ok "Alert posted to Slack ($(echo "$alerts" | python3 -c 'import json,sys; print(len(json.load(sys.stdin)))') issues)"
else
log "Alert suppressed (already sent within 1 hour)"
fi

1
bin/slack-alert Symbolic link
View File

@@ -0,0 +1 @@
/Users/alexa/blackroad-os-kpis/reports/slack-alert.sh

View File

@@ -1,236 +0,0 @@
#!/bin/bash
# Post daily KPI report to Slack (blackroadosinc.slack.com)
# Enhanced: trend deltas, fleet health, git-agent status, alert severity
#
# Requires SLACK_WEBHOOK_URL env var or ~/.blackroad/slack-webhook.env
# Optional: SLACK_ALERTS_WEBHOOK_URL for #alerts channel
source "$(dirname "$0")/../lib/common.sh"
source "$(dirname "$0")/../lib/slack.sh"
slack_load
if ! slack_ready; then
err "Slack not configured. Run: bash scripts/setup-slack.sh"
exit 1
fi
DAILY=$(today_file)
if [ ! -f "$DAILY" ]; then
err "No daily data for $TODAY. Run: bash collectors/collect-all.sh"
exit 1
fi
YESTERDAY=$(date -v-1d +%Y-%m-%d 2>/dev/null || date -d '1 day ago' +%Y-%m-%d)
YESTERDAY_FILE="$DATA_DIR/daily/${YESTERDAY}.json"
GIT_AGENT_LOG="$HOME/.blackroad/logs/git-agent.log"
export DAILY YESTERDAY_FILE GIT_AGENT_LOG
# ─── Build Slack payload ─────────────────────────────────────────────
payload=$(python3 << 'PYEOF'
import json, os, glob
daily_file = os.environ.get('DAILY', '')
yesterday_file = os.environ.get('YESTERDAY_FILE', '')
git_log = os.environ.get('GIT_AGENT_LOG', '')
with open(daily_file) as f:
data = json.load(f)
s = data['summary']
# Yesterday's data for deltas
ys = {}
if yesterday_file and os.path.exists(yesterday_file):
with open(yesterday_file) as f:
ys = json.load(f).get('summary', {})
def delta(key, invert=False):
"""Show delta from yesterday: +N or -N"""
curr = s.get(key, 0)
prev = ys.get(key, 0)
if not prev or not isinstance(curr, (int, float)):
return ''
diff = curr - prev
if diff == 0:
return ''
sign = '+' if diff > 0 else ''
emoji = ''
if invert: # lower is better (failed_units, throttled)
emoji = ' :small_red_triangle:' if diff > 0 else ' :small_red_triangle_down:'
else:
emoji = ' :chart_with_upwards_trend:' if diff > 0 else ' :chart_with_downwards_trend:'
return f" ({sign}{diff}{emoji})"
def fmt(n):
if isinstance(n, float):
return f"{n:,.1f}"
if isinstance(n, int) and n >= 1000:
return f"{n:,}"
return str(n)
# Fleet status emoji
fleet_online = s.get('fleet_online', 0)
fleet_total = s.get('fleet_total', 4)
fleet_emoji = ':large_green_circle:' if fleet_online == fleet_total else ':red_circle:' if fleet_online <= 1 else ':large_yellow_circle:'
# Autonomy score bar
score = s.get('autonomy_score', 0)
filled = score // 10
score_bar = ':black_large_square:' * filled + ':white_large_square:' * (10 - filled)
# Git agent last patrol
git_status = 'No patrol data'
if git_log and os.path.exists(git_log):
with open(git_log) as f:
lines = f.readlines()
patrols = [l.strip() for l in lines if 'PATROL:' in l]
if patrols:
last = patrols[-1]
git_status = last.split('] ', 1)[-1] if '] ' in last else last
# Alerts
alerts = []
offline = s.get('fleet_offline', [])
if offline:
alerts.append(f":rotating_light: *Nodes offline*: {', '.join(offline)}")
if s.get('failed_units', 0) > 0:
alerts.append(f":warning: *{s['failed_units']} failed systemd units*")
throttled = s.get('throttled_nodes', [])
if throttled:
alerts.append(f":fire: *Throttled*: {', '.join(throttled)}")
if s.get('avg_temp_c', 0) > 70:
alerts.append(f":thermometer: *High temp*: {s['avg_temp_c']}C avg")
if fleet_online < fleet_total:
alerts.append(f":satellite: *Fleet degraded*: {fleet_online}/{fleet_total} online")
alert_text = '\n'.join(alerts) if alerts else ':white_check_mark: All systems nominal'
# Weekly trend (last 7 days of commits)
trend_commits = []
daily_dir = os.path.dirname(daily_file)
for f in sorted(glob.glob(os.path.join(daily_dir, '*.json')))[-7:]:
try:
with open(f) as fh:
d = json.load(fh)
trend_commits.append(d.get('summary', {}).get('commits_today', 0))
except:
pass
sparkline = ''
if trend_commits:
max_c = max(trend_commits) or 1
bars = ['_', '▁', '▂', '▃', '▄', '▅', '▆', '▇', '█']
sparkline = ''.join(bars[min(8, int(c / max_c * 8))] for c in trend_commits)
sparkline = f"`{sparkline}` (7d commits)"
blocks = [
{
"type": "header",
"text": {"type": "plain_text", "text": f"BlackRoad OS — Daily KPIs {data['date']}"}
},
# ── Alerts section ──
{
"type": "section",
"text": {"type": "mrkdwn", "text": alert_text}
},
{"type": "divider"},
# ── Code velocity ──
{
"type": "section",
"fields": [
{"type": "mrkdwn", "text": f":rocket: *Code Velocity*\n"
f"Commits: *{s['commits_today']}*{delta('commits_today')}\n"
f"PRs merged: *{s['prs_merged_today']}*{delta('prs_merged_today')}\n"
f"PRs open: {s['prs_open']}{delta('prs_open')}\n"
f"Events: {s.get('github_events_today', 0)}{delta('github_events_today')}"},
{"type": "mrkdwn", "text": f":bar_chart: *Scale*\n"
f"LOC: *{fmt(s['total_loc'])}*{delta('total_loc')}\n"
f"Repos: *{s['repos_total']}* ({s['repos_github']} GH + {s['repos_gitea']} Gitea){delta('repos_total')}\n"
f"Languages: {s.get('github_language_count', 0)}\n"
f"{sparkline}"},
]
},
# ── Fleet + Services ──
{
"type": "section",
"fields": [
{"type": "mrkdwn", "text": f"{fleet_emoji} *Fleet*\n"
f"Online: *{fleet_online}/{fleet_total}*{delta('fleet_online')}\n"
f"Temp: {s.get('avg_temp_c', 0):.1f}C\n"
f"Mem: {s.get('fleet_mem_used_mb', 0)}/{s.get('fleet_mem_total_mb', 0)} MB\n"
f"Disk: {s.get('fleet_disk_used_gb', 0)}/{s.get('fleet_disk_total_gb', 0)} GB"},
{"type": "mrkdwn", "text": f":gear: *Services*\n"
f"Ollama: *{s.get('ollama_models', 0)}* models ({s.get('ollama_size_gb', 0):.1f} GB)\n"
f"Docker: {s.get('docker_containers', 0)} containers\n"
f"Systemd: {s.get('systemd_services', 0)} svc / {s.get('systemd_timers', 0)} timers\n"
f"Nginx: {s.get('nginx_sites', 0)} sites"},
]
},
# ── Autonomy + Cloud ──
{
"type": "section",
"fields": [
{"type": "mrkdwn", "text": f":robot_face: *Autonomy*\n"
f"Score: *{score}/100*{delta('autonomy_score')}\n"
f"{score_bar}\n"
f"Heals: {s.get('heal_events_today', 0)} | Restarts: {s.get('service_restarts_today', 0)}\n"
f"Crons: {s.get('fleet_cron_jobs', 0)} | Uptime: {s.get('max_uptime_days', 0)}d"},
{"type": "mrkdwn", "text": f":cloud: *Cloudflare*\n"
f"Pages: {s.get('cf_pages', 0)}{delta('cf_pages')}\n"
f"D1: {s.get('cf_d1_databases', 0)} | KV: {s.get('cf_kv_namespaces', 0)}\n"
f"R2: {s.get('cf_r2_buckets', 0)}\n"
f"DBs total: {s.get('sqlite_dbs', 0)} SQLite + {s.get('postgres_dbs', 0)} PG + {s.get('cf_d1_databases', 0)} D1"},
]
},
# ── Local Mac ──
{
"type": "section",
"fields": [
{"type": "mrkdwn", "text": f":computer: *Local Mac*\n"
f"CLI tools: {s.get('bin_tools', 0)} | Scripts: {s.get('home_scripts', 0)}\n"
f"Git repos: {s.get('local_git_repos', 0)}\n"
f"Disk: {s.get('mac_disk_used_gb', 0)} GB ({s.get('mac_disk_pct', 0)}%)\n"
f"Processes: {s.get('mac_processes', 0)}"},
{"type": "mrkdwn", "text": f":file_cabinet: *Data*\n"
f"SQLite DBs: {s.get('sqlite_dbs', 0)}\n"
f"Total DB rows: {fmt(s.get('total_db_rows', 0))}\n"
f"FTS5 entries: {fmt(s.get('fts5_entries', 0))}\n"
f"Packages: {s.get('brew_packages', 0)} brew / {s.get('pip_packages', 0)} pip / {s.get('npm_global_packages', 0)} npm"},
]
},
{"type": "divider"},
# ── Git agent status ──
{
"type": "context",
"elements": [
{"type": "mrkdwn", "text": f":satellite_antenna: Git Agent: _{git_status}_ | Collected {data['collected_at']}"}
]
}
]
payload = {"blocks": blocks}
print(json.dumps(payload))
PYEOF
)
if [ -z "$payload" ]; then
err "Failed to build Slack payload"
exit 1
fi
# ─── Post to Slack ───────────────────────────────────────────────────
log "Posting daily report to Slack..."
if slack_post "$payload"; then
ok "Daily report posted to Slack"
else
err "Slack post failed"
fi

1
bin/slack-notify Symbolic link
View File

@@ -0,0 +1 @@
/Users/alexa/blackroad-os-kpis/reports/slack-notify.sh

View File

@@ -1,191 +0,0 @@
#!/bin/bash
# Weekly Slack digest — posts Sunday night summary of the week's KPIs
# Compares this week vs last week, shows trends and highlights
source "$(dirname "$0")/../lib/common.sh"
source "$(dirname "$0")/../lib/slack.sh"
slack_load
if ! slack_ready; then
err "Slack not configured"
exit 1
fi
export DATA_DIR
payload=$(python3 << 'PYEOF'
import json, os, glob
from datetime import datetime, timedelta
data_dir = os.environ.get('DATA_DIR', 'data')
daily_dir = os.path.join(data_dir, 'daily')
# Load all daily files
dailies = {}
for f in sorted(glob.glob(os.path.join(daily_dir, '*.json'))):
try:
with open(f) as fh:
d = json.load(fh)
dailies[d['date']] = d.get('summary', {})
except:
pass
if not dailies:
print('{}')
exit()
today = datetime.now()
# This week = last 7 days, Last week = 7-14 days ago
this_week = []
last_week = []
for i in range(7):
day = (today - timedelta(days=i)).strftime('%Y-%m-%d')
if day in dailies:
this_week.append(dailies[day])
for i in range(7, 14):
day = (today - timedelta(days=i)).strftime('%Y-%m-%d')
if day in dailies:
last_week.append(dailies[day])
def week_sum(data, key):
return sum(d.get(key, 0) for d in data)
def week_avg(data, key):
vals = [d.get(key, 0) for d in data if d.get(key, 0)]
return round(sum(vals) / len(vals), 1) if vals else 0
def week_max(data, key):
vals = [d.get(key, 0) for d in data]
return max(vals) if vals else 0
def week_last(data, key):
return data[0].get(key, 0) if data else 0
def trend(curr, prev):
if not prev:
return ''
diff = curr - prev
pct = round(diff / prev * 100) if prev else 0
if diff > 0:
return f' (+{diff}, +{pct}%) :chart_with_upwards_trend:'
elif diff < 0:
return f' ({diff}, {pct}%) :chart_with_downwards_trend:'
return ' (=)'
# Key metrics
tw_commits = week_sum(this_week, 'commits_today')
lw_commits = week_sum(last_week, 'commits_today')
tw_prs = week_sum(this_week, 'prs_merged_today')
lw_prs = week_sum(last_week, 'prs_merged_today')
tw_events = week_sum(this_week, 'github_events_today')
lw_events = week_sum(last_week, 'github_events_today')
# Latest values
latest = this_week[0] if this_week else {}
loc = latest.get('total_loc', 0)
repos = latest.get('repos_total', 0)
fleet = latest.get('fleet_online', 0)
fleet_total = latest.get('fleet_total', 4)
autonomy = latest.get('autonomy_score', 0)
models = latest.get('ollama_models', 0)
# Sparkline for commits
commit_vals = []
for i in range(6, -1, -1):
day = (today - timedelta(days=i)).strftime('%Y-%m-%d')
commit_vals.append(dailies.get(day, {}).get('commits_today', 0))
bars = ['_', '▁', '▂', '▃', '▄', '▅', '▆', '▇', '█']
max_c = max(commit_vals) or 1
sparkline = ''.join(bars[min(8, int(c / max_c * 8))] for c in commit_vals)
# Uptime percentage
fleet_days = [d.get('fleet_online', 0) for d in this_week]
fleet_totals = [d.get('fleet_total', 4) for d in this_week]
uptime_pct = round(sum(fleet_days) / sum(fleet_totals) * 100) if sum(fleet_totals) > 0 else 0
# Build blocks
week_start = (today - timedelta(days=6)).strftime('%b %d')
week_end = today.strftime('%b %d')
blocks = [
{
"type": "header",
"text": {"type": "plain_text", "text": f"BlackRoad OS — Weekly Digest ({week_start} - {week_end})"}
},
# Velocity
{
"type": "section",
"fields": [
{"type": "mrkdwn", "text":
f":rocket: *Code Velocity (7d)*\n"
f"Commits: *{tw_commits}*{trend(tw_commits, lw_commits)}\n"
f"PRs merged: *{tw_prs}*{trend(tw_prs, lw_prs)}\n"
f"GH events: *{tw_events}*{trend(tw_events, lw_events)}\n"
f"`{sparkline}` daily commits"},
{"type": "mrkdwn", "text":
f":bar_chart: *Current State*\n"
f"LOC: *{loc:,}*\n"
f"Repos: *{repos}*\n"
f"Languages: {latest.get('github_language_count', 0)}\n"
f"Autonomy: *{autonomy}/100*"},
]
},
# Fleet + infra
{
"type": "section",
"fields": [
{"type": "mrkdwn", "text":
f":satellite: *Fleet*\n"
f"Online: {fleet}/{fleet_total}\n"
f"Uptime: {uptime_pct}% this week\n"
f"Avg temp: {week_avg(this_week, 'avg_temp_c')}C\n"
f"Max uptime: {week_max(this_week, 'max_uptime_days')}d"},
{"type": "mrkdwn", "text":
f":gear: *Services*\n"
f"Ollama: {models} models\n"
f"Docker: {latest.get('docker_containers', 0)} containers\n"
f"Nginx: {latest.get('nginx_sites', 0)} sites\n"
f"DBs: {latest.get('sqlite_dbs', 0)} SQLite + {latest.get('cf_d1_databases', 0)} D1"},
]
},
# Highlights
{
"type": "section",
"text": {"type": "mrkdwn", "text":
f":sparkles: *Week Highlights*\n"
f"• {len(this_week)} days of data collected\n"
f"• Peak commits: {max(commit_vals)} in a single day\n"
f"• Total heal events: {week_sum(this_week, 'heal_events_today')}\n"
f"• Service restarts: {week_sum(this_week, 'service_restarts_today')}"}
},
{"type": "divider"},
{
"type": "context",
"elements": [
{"type": "mrkdwn", "text": f":calendar: Week of {week_start} | blackroad-os-kpis weekly digest"}
]
}
]
print(json.dumps({"blocks": blocks}))
PYEOF
)
if [ -z "$payload" ] || [ "$payload" = "{}" ]; then
err "Not enough data for weekly digest"
exit 1
fi
log "Posting weekly digest to Slack..."
if slack_post "$payload"; then
ok "Weekly digest posted"
else
err "Weekly digest failed"
fi

1
bin/slack-weekly Symbolic link
View File

@@ -0,0 +1 @@
/Users/alexa/blackroad-os-kpis/reports/slack-weekly.sh

View File

@@ -31,3 +31,4 @@
# blackroad-os-kpis: slack notification after collection # blackroad-os-kpis: slack notification after collection
5 6 * * * cd /Users/alexa/blackroad-os-kpis && bash reports/slack-notify.sh >> /Users/alexa/blackroad-os-kpis/data/cron.log 2>&1 5 6 * * * cd /Users/alexa/blackroad-os-kpis && bash reports/slack-notify.sh >> /Users/alexa/blackroad-os-kpis/data/cron.log 2>&1
0 */6 * * * python3 /Users/alexa/blackroad-operator/tools/search/index-all.py --rebuild > /dev/null 2>&1 0 */6 * * * python3 /Users/alexa/blackroad-operator/tools/search/index-all.py --rebuild > /dev/null 2>&1
0 8 1 * * bash /Users/alexa/blackroad-operator/scripts/corporate-autopilot.sh >> /Users/alexa/blackroad-operator/docs/corporate/autopilot.log 2>&1

View File

@@ -1,50 +1,50 @@
[2026-03-14 16:44:01] [BEAT] [alice] load=0.89 mem=3306/3794MB temp=34.1C disk=87%
[2026-03-14 16:45:02] [BEAT] [alice] load=0.44 mem=3297/3794MB temp=36.0C disk=87%
[2026-03-14 16:45:38] [BEAT] [alice] load=1.27 mem=3302/3794MB temp=35.0C disk=87%
[2026-03-14 16:45:43] [BEAT] [alice] load=1.89 mem=3303/3794MB temp=34.1C disk=87%
[2026-03-14 16:45:47] [DIAL] [alice] Switchboard unreachable
[2026-03-14 16:46:01] [BEAT] [alice] load=1.99 mem=3304/3794MB temp=34.1C disk=87%
[2026-03-14 16:47:02] [BEAT] [alice] load=1.21 mem=3304/3794MB temp=35.5C disk=87%
[2026-03-14 16:48:01] [BEAT] [alice] load=1.30 mem=3304/3794MB temp=35.5C disk=87%
[2026-03-14 16:49:01] [BEAT] [alice] load=0.77 mem=3301/3794MB temp=35.5C disk=87%
[2026-03-14 16:50:01] [FLEET] [alice] Starting cross-node health check
[2026-03-14 16:50:01] [BEAT] [alice] load=0.86 mem=3284/3794MB temp=36.0C disk=87%
[2026-03-14 16:50:03] [FLEET] [alice] octavia: DOWN (no ping response)
[2026-03-14 16:50:04] [FLEET] [alice] cecilia: UP temp=47C mem=4836MB disk=19%
[2026-03-14 16:50:07] [FLEET] [alice] gematria: UP temp=C mem=4189MB disk=67%
[2026-03-14 16:50:07] [FLEET] [alice] lucidia: UP temp=56C mem=1180MB disk=32%
[2026-03-14 16:50:07] [FLEET] [alice] aria: UP temp=55C mem=6936MB disk=81%
[2026-03-14 16:50:08] [FLEET] [alice] anastasia: UP temp=C mem=344MB disk=69%
[2026-03-14 16:50:47] [BEAT] [alice] load=0.58 mem=3303/3794MB temp=34.6C disk=87%
[2026-03-14 16:50:47] [BEAT] [alice] load=0.58 mem=3303/3794MB temp=34.6C disk=87%
[2026-03-14 16:51:01] [BEAT] [alice] load=0.75 mem=3301/3794MB temp=36.5C disk=87%
[2026-03-14 16:52:02] [BEAT] [alice] load=0.48 mem=3303/3794MB temp=35.5C disk=87%
[2026-03-14 16:53:01] [BEAT] [alice] load=0.63 mem=3300/3794MB temp=35.0C disk=87%
[2026-03-14 16:54:01] [BEAT] [alice] load=0.83 mem=3303/3794MB temp=33.1C disk=87%
[2026-03-14 16:55:01] [BEAT] [alice] load=0.96 mem=3303/3794MB temp=35.0C disk=87%
[2026-03-14 16:55:53] [DIAL] [alice] Switchboard unreachable
[2026-03-14 16:55:56] [BEAT] [alice] load=1.15 mem=3302/3794MB temp=34.1C disk=87%
[2026-03-14 16:55:57] [BEAT] [alice] load=1.15 mem=3302/3794MB temp=33.6C disk=87%
[2026-03-14 16:56:01] [BEAT] [alice] load=1.14 mem=3302/3794MB temp=35.0C disk=87%
[2026-03-14 16:57:02] [BEAT] [alice] load=1.86 mem=3300/3794MB temp=33.6C disk=87%
[2026-03-14 16:58:01] [BEAT] [alice] load=0.79 mem=3301/3794MB temp=33.1C disk=87%
[2026-03-14 16:59:01] [BEAT] [alice] load=1.02 mem=3302/3794MB temp=32.1C disk=87%
[2026-03-14 17:00:01] [FLEET] [alice] Starting cross-node health check
[2026-03-14 17:00:01] [BEAT] [alice] load=0.54 mem=3292/3794MB temp=35.0C disk=87%
[2026-03-14 17:00:03] [FLEET] [alice] octavia: DOWN (no ping response)
[2026-03-14 17:00:04] [FLEET] [alice] cecilia: UP temp=40C mem=4752MB disk=19%
[2026-03-14 17:00:07] [FLEET] [alice] gematria: UP temp=C mem=4176MB disk=67%
[2026-03-14 17:00:08] [FLEET] [alice] lucidia: UP temp=56C mem=1198MB disk=32%
[2026-03-14 17:00:08] [FLEET] [alice] aria: UP temp=51C mem=6928MB disk=81%
[2026-03-14 17:00:09] [FLEET] [alice] anastasia: UP temp=C mem=345MB disk=69%
[2026-03-14 17:00:47] [DIAL] [alice] Switchboard unreachable
[2026-03-14 17:01:01] [BEAT] [alice] load=2.14 mem=3304/3794MB temp=33.6C disk=87%
[2026-03-14 17:01:07] [BEAT] [alice] load=1.97 mem=3303/3794MB temp=33.1C disk=87%
[2026-03-14 17:01:07] [BEAT] [alice] load=1.97 mem=3303/3794MB temp=33.6C disk=87%
[2026-03-14 17:02:01] [BEAT] [alice] load=1.03 mem=3300/3794MB temp=33.6C disk=87%
[2026-03-14 17:03:08] [BEAT] [alice] load=2.25 mem=3304/3794MB temp=33.1C disk=87%
[2026-03-14 17:04:01] [BEAT] [alice] load=6.44 mem=3302/3794MB temp=35.5C disk=87% [2026-03-14 17:04:01] [BEAT] [alice] load=6.44 mem=3302/3794MB temp=35.5C disk=87%
[2026-03-14 17:05:01] [BEAT] [alice] load=2.91 mem=3299/3794MB temp=34.1C disk=87% [2026-03-14 17:05:01] [BEAT] [alice] load=2.91 mem=3299/3794MB temp=34.1C disk=87%
[2026-03-14 17:06:02] [BEAT] [alice] load=1.57 mem=3300/3794MB temp=36.0C disk=87% [2026-03-14 17:06:02] [BEAT] [alice] load=1.57 mem=3300/3794MB temp=36.0C disk=87%
[2026-03-14 17:06:16] [BEAT] [alice] load=1.43 mem=3302/3794MB temp=34.6C disk=87% [2026-03-14 17:06:16] [BEAT] [alice] load=1.43 mem=3302/3794MB temp=34.6C disk=87%
[2026-03-14 17:06:16] [BEAT] [alice] load=1.43 mem=3302/3794MB temp=34.6C disk=87% [2026-03-14 17:06:16] [BEAT] [alice] load=1.43 mem=3302/3794MB temp=34.6C disk=87%
[2026-03-14 17:07:01] [BEAT] [alice] load=1.46 mem=3303/3794MB temp=34.1C disk=87%
[2026-03-14 17:08:01] [BEAT] [alice] load=1.36 mem=3295/3794MB temp=35.5C disk=87%
[2026-03-14 17:09:01] [BEAT] [alice] load=1.03 mem=3290/3794MB temp=34.1C disk=87%
[2026-03-14 17:10:01] [FLEET] [alice] Starting cross-node health check
[2026-03-14 17:10:01] [BEAT] [alice] load=0.90 mem=3301/3794MB temp=35.5C disk=87%
[2026-03-14 17:10:03] [FLEET] [alice] octavia: DOWN (no ping response)
[2026-03-14 17:10:03] [FLEET] [alice] cecilia: UP temp=39C mem=4866MB disk=19%
[2026-03-14 17:10:06] [FLEET] [alice] gematria: UP temp=C mem=4180MB disk=67%
[2026-03-14 17:10:07] [FLEET] [alice] lucidia: UP temp=63C mem=1236MB disk=32%
[2026-03-14 17:10:08] [FLEET] [alice] aria: UP temp=52C mem=6933MB disk=81%
[2026-03-14 17:10:09] [FLEET] [alice] anastasia: UP temp=C mem=343MB disk=69%
[2026-03-14 17:10:53] [DIAL] [alice] Switchboard unreachable
[2026-03-14 17:11:01] [BEAT] [alice] load=0.62 mem=3298/3794MB temp=32.6C disk=87%
[2026-03-14 17:11:25] [BEAT] [alice] load=0.57 mem=3304/3794MB temp=35.0C disk=87%
[2026-03-14 17:11:25] [BEAT] [alice] load=0.57 mem=3304/3794MB temp=34.6C disk=87%
[2026-03-14 17:12:01] [BEAT] [alice] load=0.56 mem=3305/3794MB temp=34.6C disk=87%
[2026-03-14 17:13:01] [BEAT] [alice] load=0.74 mem=3306/3794MB temp=33.6C disk=87%
[2026-03-14 17:14:01] [BEAT] [alice] load=0.80 mem=3304/3794MB temp=33.6C disk=87%
[2026-03-14 17:15:01] [BEAT] [alice] load=0.49 mem=3297/3794MB temp=34.6C disk=87%
[2026-03-14 17:15:47] [DIAL] [alice] Switchboard unreachable
[2026-03-14 17:16:01] [BEAT] [alice] load=0.69 mem=3293/3794MB temp=34.6C disk=87%
[2026-03-14 17:16:39] [BEAT] [alice] load=0.54 mem=3303/3794MB temp=34.1C disk=87%
[2026-03-14 17:16:39] [BEAT] [alice] load=0.54 mem=3302/3794MB temp=33.6C disk=87%
[2026-03-14 17:17:01] [BEAT] [alice] load=0.47 mem=3305/3794MB temp=33.6C disk=87%
[2026-03-14 17:18:01] [BEAT] [alice] load=0.71 mem=3306/3794MB temp=34.1C disk=87%
[2026-03-14 17:19:01] [BEAT] [alice] load=0.59 mem=3302/3794MB temp=35.0C disk=87%
[2026-03-14 17:20:01] [FLEET] [alice] Starting cross-node health check
[2026-03-14 17:20:02] [BEAT] [alice] load=0.68 mem=3285/3794MB temp=35.5C disk=87%
[2026-03-14 17:20:03] [FLEET] [alice] octavia: DOWN (no ping response)
[2026-03-14 17:20:04] [FLEET] [alice] cecilia: UP temp=39C mem=4936MB disk=19%
[2026-03-14 17:20:07] [FLEET] [alice] gematria: UP temp=C mem=4177MB disk=67%
[2026-03-14 17:20:08] [FLEET] [alice] lucidia: UP temp=64C mem=1289MB disk=32%
[2026-03-14 17:20:08] [FLEET] [alice] aria: UP temp=54C mem=6929MB disk=81%
[2026-03-14 17:20:09] [FLEET] [alice] anastasia: UP temp=C mem=371MB disk=69%
[2026-03-14 17:21:01] [BEAT] [alice] load=0.55 mem=3301/3794MB temp=34.6C disk=87%
[2026-03-14 17:21:47] [BEAT] [alice] load=0.44 mem=3303/3794MB temp=34.6C disk=87%
[2026-03-14 17:21:47] [BEAT] [alice] load=0.44 mem=3302/3794MB temp=33.1C disk=87%
[2026-03-14 17:22:01] [BEAT] [alice] load=0.48 mem=3303/3794MB temp=33.6C disk=87%
[2026-03-14 17:23:01] [BEAT] [alice] load=0.80 mem=3301/3794MB temp=35.5C disk=87%
[2026-03-14 17:24:01] [BEAT] [alice] load=1.13 mem=3302/3794MB temp=34.6C disk=87%
[2026-03-14 17:25:02] [BEAT] [alice] load=0.98 mem=3276/3794MB temp=37.0C disk=87%
[2026-03-14 17:25:54] [DIAL] [alice] Switchboard unreachable
[2026-03-14 17:26:01] [BEAT] [alice] load=0.54 mem=3301/3794MB temp=34.6C disk=87%
[2026-03-14 17:26:56] [BEAT] [alice] load=1.24 mem=3300/3794MB temp=35.5C disk=87%
[2026-03-14 17:26:57] [BEAT] [alice] load=1.24 mem=3300/3794MB temp=35.5C disk=87%

View File

@@ -1 +1 @@
{"node":"alice","ts":"2026-03-14T22:06:16Z","load":1.43,"mem_free_mb":3302,"mem_total_mb":3794,"temp_c":34.6,"disk_pct":87,"throttle":"0x0"} {"node":"alice","ts":"2026-03-14T22:26:56Z","load":1.24,"mem_free_mb":3300,"mem_total_mb":3794,"temp_c":35.5,"disk_pct":87,"throttle":"0x0"}

View File

@@ -1,9 +1,9 @@
{ {
"hostname": "alice", "hostname": "alice",
"ts": "2026-03-14T22:06:17Z", "ts": "2026-03-14T22:26:58Z",
"uptime_seconds": 265680, "uptime_seconds": 266921,
"kernel": "6.1.21-v8+", "kernel": "6.1.21-v8+",
"temp_c": 34.6, "temp_c": 36.0,
"memory_mb": { "memory_mb": {
"total": 3794, "total": 3794,
"used": 403, "used": 403,
@@ -11,9 +11,9 @@
}, },
"disk": "12G/15G (87%)", "disk": "12G/15G (87%)",
"load": [ "load": [
1.43, 1.24,
2.06, 0.88,
1.56 0.99
], ],
"ollama_models": [ "ollama_models": [
"qwen2.5:3b", "qwen2.5:3b",
@@ -25,5 +25,5 @@
], ],
"throttle": "0x0", "throttle": "0x0",
"voltage": "0.9160V", "voltage": "0.9160V",
"services_running": 42 "services_running": 41
} }

View File

@@ -11,7 +11,7 @@ LISTEN 0 5 0.0.0.0:8787 0.0.0.0:* users:(("python3",pid
LISTEN 0 511 0.0.0.0:80 0.0.0.0:* users:(("nginx",pid=3461172,fd=8),("nginx",pid=3461171,fd=8)) LISTEN 0 511 0.0.0.0:80 0.0.0.0:* users:(("nginx",pid=3461172,fd=8),("nginx",pid=3461171,fd=8))
LISTEN 0 4096 0.0.0.0:111 0.0.0.0:* users:(("rpcbind",pid=589,fd=4),("systemd",pid=1,fd=127)) LISTEN 0 4096 0.0.0.0:111 0.0.0.0:* users:(("rpcbind",pid=589,fd=4),("systemd",pid=1,fd=127))
LISTEN 0 4096 *:8080 *:* users:(("headscale",pid=2341808,fd=12)) LISTEN 0 4096 *:8080 *:* users:(("headscale",pid=2341808,fd=12))
LISTEN 0 511 *:3000 *:* users:(("node /srv/hello",pid=1473503,fd=19)) LISTEN 0 511 *:3000 *:* users:(("node /srv/hello",pid=1477747,fd=19))
LISTEN 0 511 *:3001 *:* users:(("node",pid=757,fd=21)) LISTEN 0 511 *:3001 *:* users:(("node",pid=757,fd=21))
LISTEN 0 128 [::]:22 [::]:* users:(("sshd",pid=991,fd=8)) LISTEN 0 128 [::]:22 [::]:* users:(("sshd",pid=991,fd=8))
LISTEN 0 511 [::]:80 [::]:* users:(("nginx",pid=3461172,fd=9),("nginx",pid=3461171,fd=9)) LISTEN 0 511 [::]:80 [::]:* users:(("nginx",pid=3461172,fd=9),("nginx",pid=3461171,fd=9))

View File

@@ -1,19 +1,19 @@
{ {
"hostname": "anastasia", "hostname": "anastasia",
"ts": "2026-03-14T22:06:18Z", "ts": "2026-03-14T22:26:59Z",
"uptime_seconds": 6667870, "uptime_seconds": 6669111,
"kernel": "5.14.0-651.el9.x86_64", "kernel": "5.14.0-651.el9.x86_64",
"temp_c": 0, "temp_c": 0,
"memory_mb": { "memory_mb": {
"total": 765, "total": 765,
"used": 399, "used": 402,
"free": 365 "free": 362
}, },
"disk": "18G/25G (69%)", "disk": "18G/25G (69%)",
"load": [ "load": [
0.22, 0.15,
0.09, 0.1,
0.08 0.09
], ],
"ollama_models": [], "ollama_models": [],
"throttle": "N/A", "throttle": "N/A",

View File

@@ -1,46 +1,3 @@
[2026-03-14 16:43:01] [BEAT] [aria] load=0.07 mem=6979/8059MB temp=53.5C disk=81%
[2026-03-14 16:44:01] [BEAT] [aria] load=0.13 mem=6980/8059MB temp=52.9C disk=81%
[2026-03-14 16:45:01] [BEAT] [aria] load=0.05 mem=6949/8059MB temp=54.0C disk=81%
[2026-03-14 16:45:34] [BEAT] [aria] load=0.19 mem=6964/8059MB temp=54.0C disk=81%
[2026-03-14 16:45:34] [BEAT] [aria] load=0.19 mem=6963/8059MB temp=54.0C disk=81%
[2026-03-14 16:45:46] [DIAL] [aria] Switchboard unreachable
[2026-03-14 16:46:01] [BEAT] [aria] load=0.11 mem=6978/8059MB temp=52.9C disk=81%
[2026-03-14 16:47:01] [BEAT] [aria] load=0.12 mem=6979/8059MB temp=54.0C disk=81%
[2026-03-14 16:48:01] [BEAT] [aria] load=0.11 mem=6978/8059MB temp=53.5C disk=81%
[2026-03-14 16:49:01] [BEAT] [aria] load=0.12 mem=6979/8059MB temp=52.9C disk=81%
[2026-03-14 16:50:02] [FLEET] [aria] Starting cross-node health check
[2026-03-14 16:50:02] [BEAT] [aria] load=0.04 mem=6950/8059MB temp=54.0C disk=81%
[2026-03-14 16:50:02] [FLEET] [aria] alice: UP temp=37C mem=3299MB disk=87%
[2026-03-14 16:50:04] [FLEET] [aria] octavia: DOWN (no ping response)
[2026-03-14 16:50:04] [FLEET] [aria] cecilia: UP temp=45C mem=4834MB disk=19%
[2026-03-14 16:50:07] [FLEET] [aria] gematria: UP temp=C mem=4188MB disk=67%
[2026-03-14 16:50:07] [FLEET] [aria] lucidia: UP temp=55C mem=1177MB disk=32%
[2026-03-14 16:50:08] [FLEET] [aria] anastasia: UP temp=C mem=344MB disk=69%
[2026-03-14 16:50:47] [BEAT] [aria] load=0.27 mem=6975/8059MB temp=53.5C disk=81%
[2026-03-14 16:50:47] [BEAT] [aria] load=0.27 mem=6972/8059MB temp=53.5C disk=81%
[2026-03-14 16:51:01] [BEAT] [aria] load=0.57 mem=6975/8059MB temp=53.5C disk=81%
[2026-03-14 16:52:01] [BEAT] [aria] load=1.07 mem=6977/8059MB temp=52.9C disk=81%
[2026-03-14 16:53:01] [BEAT] [aria] load=0.51 mem=6978/8059MB temp=52.9C disk=81%
[2026-03-14 16:54:01] [BEAT] [aria] load=0.18 mem=6976/8059MB temp=52.9C disk=81%
[2026-03-14 16:55:01] [BEAT] [aria] load=0.11 mem=6954/8059MB temp=52.9C disk=81%
[2026-03-14 16:55:56] [BEAT] [aria] load=0.04 mem=6974/8059MB temp=51.8C disk=81%
[2026-03-14 16:55:56] [BEAT] [aria] load=0.04 mem=6971/8059MB temp=52.4C disk=81%
[2026-03-14 16:56:02] [BEAT] [aria] load=0.04 mem=6972/8059MB temp=51.8C disk=81%
[2026-03-14 16:57:01] [BEAT] [aria] load=0.01 mem=6975/8059MB temp=50.7C disk=81%
[2026-03-14 16:58:01] [BEAT] [aria] load=0.12 mem=6976/8059MB temp=50.7C disk=81%
[2026-03-14 16:59:01] [BEAT] [aria] load=0.09 mem=6973/8059MB temp=50.1C disk=81%
[2026-03-14 17:00:01] [FLEET] [aria] Starting cross-node health check
[2026-03-14 17:00:01] [BEAT] [aria] load=0.09 mem=6943/8059MB temp=50.7C disk=81%
[2026-03-14 17:00:01] [FLEET] [aria] alice: UP temp=35C mem=3292MB disk=87%
[2026-03-14 17:00:03] [FLEET] [aria] octavia: DOWN (no ping response)
[2026-03-14 17:00:04] [FLEET] [aria] cecilia: UP temp=41C mem=4751MB disk=19%
[2026-03-14 17:00:05] [FLEET] [aria] gematria: UP temp=C mem=4176MB disk=67%
[2026-03-14 17:00:06] [FLEET] [aria] lucidia: UP temp=57C mem=1251MB disk=32%
[2026-03-14 17:00:07] [FLEET] [aria] anastasia: UP temp=C mem=340MB disk=69%
[2026-03-14 17:00:46] [DIAL] [aria] Switchboard unreachable
[2026-03-14 17:01:01] [BEAT] [aria] load=0.11 mem=6971/8059MB temp=51.8C disk=81%
[2026-03-14 17:01:06] [BEAT] [aria] load=0.10 mem=6974/8059MB temp=50.7C disk=81%
[2026-03-14 17:01:06] [BEAT] [aria] load=0.10 mem=6972/8059MB temp=51.2C disk=81%
[2026-03-14 17:02:01] [BEAT] [aria] load=0.04 mem=6977/8059MB temp=51.2C disk=81% [2026-03-14 17:02:01] [BEAT] [aria] load=0.04 mem=6977/8059MB temp=51.2C disk=81%
[2026-03-14 17:03:01] [BEAT] [aria] load=0.19 mem=6976/8059MB temp=51.2C disk=81% [2026-03-14 17:03:01] [BEAT] [aria] load=0.19 mem=6976/8059MB temp=51.2C disk=81%
[2026-03-14 17:04:01] [BEAT] [aria] load=0.11 mem=6973/8059MB temp=51.2C disk=81% [2026-03-14 17:04:01] [BEAT] [aria] load=0.11 mem=6973/8059MB temp=51.2C disk=81%
@@ -48,3 +5,46 @@
[2026-03-14 17:06:01] [BEAT] [aria] load=0.15 mem=6977/8059MB temp=52.4C disk=81% [2026-03-14 17:06:01] [BEAT] [aria] load=0.15 mem=6977/8059MB temp=52.4C disk=81%
[2026-03-14 17:06:15] [BEAT] [aria] load=0.13 mem=6975/8059MB temp=51.8C disk=81% [2026-03-14 17:06:15] [BEAT] [aria] load=0.13 mem=6975/8059MB temp=51.8C disk=81%
[2026-03-14 17:06:15] [BEAT] [aria] load=0.13 mem=6973/8059MB temp=52.4C disk=81% [2026-03-14 17:06:15] [BEAT] [aria] load=0.13 mem=6973/8059MB temp=52.4C disk=81%
[2026-03-14 17:07:01] [BEAT] [aria] load=0.11 mem=6978/8059MB temp=52.9C disk=81%
[2026-03-14 17:08:01] [BEAT] [aria] load=0.04 mem=6977/8059MB temp=51.2C disk=81%
[2026-03-14 17:09:01] [BEAT] [aria] load=0.07 mem=6976/8059MB temp=51.8C disk=81%
[2026-03-14 17:10:01] [FLEET] [aria] Starting cross-node health check
[2026-03-14 17:10:01] [BEAT] [aria] load=0.02 mem=6953/8059MB temp=52.4C disk=81%
[2026-03-14 17:10:02] [FLEET] [aria] alice: UP temp=36C mem=3295MB disk=87%
[2026-03-14 17:10:04] [FLEET] [aria] octavia: DOWN (no ping response)
[2026-03-14 17:10:04] [FLEET] [aria] cecilia: UP temp=38C mem=4867MB disk=19%
[2026-03-14 17:10:06] [FLEET] [aria] gematria: UP temp=C mem=4180MB disk=67%
[2026-03-14 17:10:07] [FLEET] [aria] lucidia: UP temp=63C mem=1279MB disk=32%
[2026-03-14 17:10:08] [FLEET] [aria] anastasia: UP temp=C mem=343MB disk=69%
[2026-03-14 17:11:02] [BEAT] [aria] load=0.10 mem=6975/8059MB temp=51.8C disk=81%
[2026-03-14 17:11:24] [BEAT] [aria] load=0.07 mem=6978/8059MB temp=51.8C disk=81%
[2026-03-14 17:11:24] [BEAT] [aria] load=0.07 mem=6975/8059MB temp=52.4C disk=81%
[2026-03-14 17:12:01] [BEAT] [aria] load=0.53 mem=6975/8059MB temp=51.2C disk=81%
[2026-03-14 17:13:01] [BEAT] [aria] load=1.16 mem=6976/8059MB temp=52.9C disk=81%
[2026-03-14 17:14:01] [BEAT] [aria] load=0.50 mem=6971/8059MB temp=51.8C disk=81%
[2026-03-14 17:15:01] [BEAT] [aria] load=0.18 mem=6949/8059MB temp=52.9C disk=81%
[2026-03-14 17:15:46] [DIAL] [aria] Switchboard unreachable
[2026-03-14 17:16:01] [BEAT] [aria] load=0.24 mem=6969/8059MB temp=52.9C disk=81%
[2026-03-14 17:16:38] [BEAT] [aria] load=0.18 mem=6965/8059MB temp=52.4C disk=81%
[2026-03-14 17:16:38] [BEAT] [aria] load=0.18 mem=6963/8059MB temp=52.4C disk=81%
[2026-03-14 17:17:01] [BEAT] [aria] load=0.13 mem=6966/8059MB temp=53.5C disk=81%
[2026-03-14 17:18:01] [BEAT] [aria] load=0.04 mem=6965/8059MB temp=51.8C disk=81%
[2026-03-14 17:19:01] [BEAT] [aria] load=0.01 mem=6970/8059MB temp=51.8C disk=81%
[2026-03-14 17:20:01] [FLEET] [aria] Starting cross-node health check
[2026-03-14 17:20:01] [BEAT] [aria] load=0.04 mem=6934/8059MB temp=52.4C disk=81%
[2026-03-14 17:20:02] [FLEET] [aria] alice: UP temp=35C mem=3283MB disk=87%
[2026-03-14 17:20:04] [FLEET] [aria] octavia: DOWN (no ping response)
[2026-03-14 17:20:04] [FLEET] [aria] cecilia: UP temp=39C mem=4935MB disk=19%
[2026-03-14 17:20:06] [FLEET] [aria] gematria: UP temp=C mem=4177MB disk=67%
[2026-03-14 17:20:07] [FLEET] [aria] lucidia: UP temp=62C mem=1319MB disk=32%
[2026-03-14 17:20:07] [FLEET] [aria] anastasia: UP temp=C mem=371MB disk=69%
[2026-03-14 17:21:01] [BEAT] [aria] load=0.08 mem=6959/8059MB temp=52.4C disk=81%
[2026-03-14 17:21:47] [BEAT] [aria] load=0.07 mem=6965/8059MB temp=52.9C disk=81%
[2026-03-14 17:21:47] [BEAT] [aria] load=0.07 mem=6962/8059MB temp=52.4C disk=81%
[2026-03-14 17:22:01] [BEAT] [aria] load=0.06 mem=6966/8059MB temp=52.4C disk=81%
[2026-03-14 17:23:02] [BEAT] [aria] load=0.17 mem=6970/8059MB temp=52.4C disk=81%
[2026-03-14 17:24:01] [BEAT] [aria] load=0.17 mem=6968/8059MB temp=51.8C disk=81%
[2026-03-14 17:25:01] [BEAT] [aria] load=0.12 mem=6936/8059MB temp=53.5C disk=81%
[2026-03-14 17:26:01] [BEAT] [aria] load=0.04 mem=6965/8059MB temp=54.0C disk=81%
[2026-03-14 17:26:56] [BEAT] [aria] load=0.16 mem=6948/8059MB temp=52.9C disk=81%
[2026-03-14 17:26:56] [BEAT] [aria] load=0.16 mem=6946/8059MB temp=54.0C disk=81%

View File

@@ -1 +1 @@
{"node":"aria","ts":"2026-03-14T22:06:15Z","load":0.13,"mem_free_mb":6973,"mem_total_mb":8059,"temp_c":52.4,"disk_pct":81,"throttle":"N/A"} {"node":"aria","ts":"2026-03-14T22:26:56Z","load":0.16,"mem_free_mb":6946,"mem_total_mb":8059,"temp_c":54.0,"disk_pct":81,"throttle":"N/A"}

View File

@@ -1,19 +1,19 @@
{ {
"hostname": "aria", "hostname": "aria",
"ts": "2026-03-14T22:06:16Z", "ts": "2026-03-14T22:26:57Z",
"uptime_seconds": 58952, "uptime_seconds": 60193,
"kernel": "6.12.62+rpt-rpi-2712", "kernel": "6.12.62+rpt-rpi-2712",
"temp_c": 51.8, "temp_c": 52.4,
"memory_mb": { "memory_mb": {
"total": 8059, "total": 8059,
"used": 1090, "used": 1119,
"free": 6968 "free": 6939
}, },
"disk": "22G/29G (81%)", "disk": "22G/29G (81%)",
"load": [ "load": [
0.16,
0.12, 0.12,
0.13, 0.13
0.14
], ],
"ollama_models": [ "ollama_models": [
"nomic-embed-text:latest", "nomic-embed-text:latest",

View File

@@ -1,50 +1,50 @@
[2026-03-14 16:49:01] [BEAT] [cecilia] load=5.03 mem=4746/8058MB temp=50.1C disk=19% [2026-03-14 17:09:01] [BEAT] [cecilia] load=0.62 mem=4896/8058MB temp=37.5C disk=19%
[2026-03-14 16:50:02] [FLEET] [cecilia] Starting cross-node health check [2026-03-14 17:10:01] [FLEET] [cecilia] Starting cross-node health check
[2026-03-14 16:50:02] [BEAT] [cecilia] load=4.52 mem=4704/8058MB temp=45.8C disk=19% [2026-03-14 17:10:01] [BEAT] [cecilia] load=1.34 mem=4877/8058MB temp=38.0C disk=19%
[2026-03-14 16:50:02] [HEAL] [cecilia] OOM kills detected (1) — clearing caches [2026-03-14 17:10:01] [HEAL] [cecilia] OOM kills detected (1) — clearing caches
[2026-03-14 16:50:03] [HEAL] [cecilia] High swap: 956MB — clearing inactive [2026-03-14 17:10:02] [HEAL] [cecilia] High swap: 942MB — clearing inactive
[2026-03-14 16:50:03] [FLEET] [cecilia] alice: UP temp=35C mem=3299MB disk=87% [2026-03-14 17:10:02] [FLEET] [cecilia] alice: UP temp=36C mem=3281MB disk=87%
[2026-03-14 16:50:05] [FLEET] [cecilia] octavia: DOWN (no ping response) [2026-03-14 17:10:04] [FLEET] [cecilia] octavia: DOWN (no ping response)
[2026-03-14 16:50:07] [FLEET] [cecilia] gematria: UP temp=C mem=4188MB disk=67% [2026-03-14 17:10:06] [FLEET] [cecilia] gematria: UP temp=C mem=4180MB disk=67%
[2026-03-14 16:50:07] [FLEET] [cecilia] lucidia: UP temp=56C mem=1179MB disk=32% [2026-03-14 17:10:06] [FLEET] [cecilia] lucidia: UP temp=63C mem=1278MB disk=32%
[2026-03-14 16:50:07] [FLEET] [cecilia] aria: UP temp=55C mem=6937MB disk=81% [2026-03-14 17:10:07] [FLEET] [cecilia] aria: UP temp=52C mem=6924MB disk=81%
[2026-03-14 16:50:08] [FLEET] [cecilia] anastasia: UP temp=C mem=344MB disk=69% [2026-03-14 17:10:08] [FLEET] [cecilia] anastasia: UP temp=C mem=343MB disk=69%
[2026-03-14 16:50:47] [BEAT] [cecilia] load=2.38 mem=4887/8058MB temp=44.6C disk=19% [2026-03-14 17:11:01] [BEAT] [cecilia] load=1.15 mem=4894/8058MB temp=38.0C disk=19%
[2026-03-14 16:50:47] [BEAT] [cecilia] load=2.38 mem=4883/8058MB temp=43.5C disk=19% [2026-03-14 17:11:24] [BEAT] [cecilia] load=0.82 mem=4887/8058MB temp=38.6C disk=19%
[2026-03-14 16:51:01] [BEAT] [cecilia] load=1.92 mem=4884/8058MB temp=44.6C disk=19% [2026-03-14 17:11:24] [BEAT] [cecilia] load=0.82 mem=4888/8058MB temp=38.0C disk=19%
[2026-03-14 16:52:01] [BEAT] [cecilia] load=0.92 mem=4894/8058MB temp=43.0C disk=19% [2026-03-14 17:12:01] [BEAT] [cecilia] load=0.53 mem=4886/8058MB temp=38.6C disk=19%
[2026-03-14 16:53:01] [BEAT] [cecilia] load=0.58 mem=4888/8058MB temp=41.9C disk=19% [2026-03-14 17:13:01] [BEAT] [cecilia] load=0.28 mem=4885/8058MB temp=37.5C disk=19%
[2026-03-14 16:54:01] [BEAT] [cecilia] load=0.55 mem=4902/8058MB temp=41.4C disk=19% [2026-03-14 17:14:01] [BEAT] [cecilia] load=0.30 mem=4883/8058MB temp=37.5C disk=19%
[2026-03-14 16:55:02] [BEAT] [cecilia] load=0.37 mem=4863/8058MB temp=41.9C disk=19% [2026-03-14 17:15:01] [BEAT] [cecilia] load=0.23 mem=4811/8058MB temp=38.6C disk=19%
[2026-03-14 16:55:02] [HEAL] [cecilia] OOM kills detected (1) — clearing caches [2026-03-14 17:15:01] [HEAL] [cecilia] OOM kills detected (1) — clearing caches
[2026-03-14 16:55:02] [HEAL] [cecilia] High swap: 956MB — clearing inactive [2026-03-14 17:15:01] [HEAL] [cecilia] High swap: 941MB — clearing inactive
[2026-03-14 16:55:56] [BEAT] [cecilia] load=0.39 mem=4916/8058MB temp=40.2C disk=19% [2026-03-14 17:15:46] [DIAL] [cecilia] Switchboard unreachable
[2026-03-14 16:55:56] [BEAT] [cecilia] load=0.39 mem=4915/8058MB temp=41.4C disk=19% [2026-03-14 17:15:54] [DIAL] [cecilia] Switchboard unreachable
[2026-03-14 16:56:01] [BEAT] [cecilia] load=0.43 mem=4910/8058MB temp=39.7C disk=19% [2026-03-14 17:16:01] [BEAT] [cecilia] load=0.18 mem=4766/8058MB temp=38.0C disk=19%
[2026-03-14 16:57:01] [BEAT] [cecilia] load=0.45 mem=4911/8058MB temp=39.7C disk=19% [2026-03-14 17:16:38] [BEAT] [cecilia] load=0.23 mem=4774/8058MB temp=38.0C disk=19%
[2026-03-14 16:58:01] [BEAT] [cecilia] load=0.35 mem=4911/8058MB temp=39.1C disk=19% [2026-03-14 17:16:38] [BEAT] [cecilia] load=0.23 mem=4773/8058MB temp=37.5C disk=19%
[2026-03-14 16:59:01] [BEAT] [cecilia] load=0.38 mem=4906/8058MB temp=39.1C disk=19% [2026-03-14 17:17:01] [BEAT] [cecilia] load=0.39 mem=4777/8058MB temp=38.0C disk=19%
[2026-03-14 17:00:01] [FLEET] [cecilia] Starting cross-node health check [2026-03-14 17:18:01] [BEAT] [cecilia] load=0.37 mem=4793/8058MB temp=37.5C disk=19%
[2026-03-14 17:00:01] [BEAT] [cecilia] load=0.17 mem=4867/8058MB temp=39.7C disk=19% [2026-03-14 17:19:01] [BEAT] [cecilia] load=0.26 mem=4797/8058MB temp=37.5C disk=19%
[2026-03-14 17:00:01] [HEAL] [cecilia] OOM kills detected (1) — clearing caches [2026-03-14 17:20:01] [FLEET] [cecilia] Starting cross-node health check
[2026-03-14 17:00:02] [HEAL] [cecilia] High swap: 943MB — clearing inactive [2026-03-14 17:20:01] [BEAT] [cecilia] load=0.22 mem=4773/8058MB temp=38.6C disk=19%
[2026-03-14 17:00:02] [FLEET] [cecilia] alice: UP temp=36C mem=3294MB disk=87% [2026-03-14 17:20:02] [HEAL] [cecilia] OOM kills detected (1) — clearing caches
[2026-03-14 17:00:04] [FLEET] [cecilia] octavia: DOWN (no ping response) [2026-03-14 17:20:02] [HEAL] [cecilia] High swap: 939MB — clearing inactive
[2026-03-14 17:00:06] [FLEET] [cecilia] gematria: UP temp=C mem=4176MB disk=67% [2026-03-14 17:20:02] [FLEET] [cecilia] alice: UP temp=36C mem=3285MB disk=87%
[2026-03-14 17:00:08] [FLEET] [cecilia] lucidia: UP temp=56C mem=1201MB disk=32% [2026-03-14 17:20:04] [FLEET] [cecilia] octavia: DOWN (no ping response)
[2026-03-14 17:00:08] [FLEET] [cecilia] aria: UP temp=52C mem=6929MB disk=81% [2026-03-14 17:20:06] [FLEET] [cecilia] gematria: UP temp=C mem=4177MB disk=67%
[2026-03-14 17:00:09] [FLEET] [cecilia] anastasia: UP temp=C mem=345MB disk=69% [2026-03-14 17:20:07] [FLEET] [cecilia] lucidia: UP temp=63C mem=1317MB disk=32%
[2026-03-14 17:00:46] [DIAL] [cecilia] Switchboard unreachable [2026-03-14 17:20:07] [FLEET] [cecilia] aria: UP temp=54C mem=6925MB disk=81%
[2026-03-14 17:00:54] [DIAL] [cecilia] Switchboard unreachable [2026-03-14 17:20:08] [FLEET] [cecilia] anastasia: UP temp=C mem=371MB disk=69%
[2026-03-14 17:01:01] [BEAT] [cecilia] load=0.15 mem=4830/8058MB temp=39.7C disk=19% [2026-03-14 17:21:01] [BEAT] [cecilia] load=0.54 mem=4971/8058MB temp=37.5C disk=19%
[2026-03-14 17:01:06] [BEAT] [cecilia] load=0.14 mem=4824/8058MB temp=38.6C disk=19% [2026-03-14 17:21:47] [BEAT] [cecilia] load=0.30 mem=4963/8058MB temp=37.5C disk=19%
[2026-03-14 17:01:06] [BEAT] [cecilia] load=0.14 mem=4822/8058MB temp=39.1C disk=19% [2026-03-14 17:21:47] [BEAT] [cecilia] load=0.30 mem=4963/8058MB temp=38.0C disk=19%
[2026-03-14 17:02:01] [BEAT] [cecilia] load=0.24 mem=4836/8058MB temp=38.6C disk=19% [2026-03-14 17:22:01] [BEAT] [cecilia] load=0.37 mem=4966/8058MB temp=38.6C disk=19%
[2026-03-14 17:03:02] [BEAT] [cecilia] load=0.17 mem=4836/8058MB temp=38.6C disk=19% [2026-03-14 17:23:01] [BEAT] [cecilia] load=0.60 mem=4964/8058MB temp=37.0C disk=19%
[2026-03-14 17:04:02] [BEAT] [cecilia] load=0.26 mem=4831/8058MB temp=38.0C disk=19% [2026-03-14 17:24:01] [BEAT] [cecilia] load=0.34 mem=4959/8058MB temp=38.0C disk=19%
[2026-03-14 17:05:01] [BEAT] [cecilia] load=0.19 mem=4798/8058MB temp=39.1C disk=19% [2026-03-14 17:25:01] [BEAT] [cecilia] load=0.18 mem=4953/8058MB temp=37.5C disk=19%
[2026-03-14 17:05:01] [HEAL] [cecilia] OOM kills detected (1) — clearing caches [2026-03-14 17:25:01] [HEAL] [cecilia] OOM kills detected (1) — clearing caches
[2026-03-14 17:05:01] [HEAL] [cecilia] High swap: 943MB — clearing inactive [2026-03-14 17:25:02] [HEAL] [cecilia] High swap: 939MB — clearing inactive
[2026-03-14 17:06:01] [BEAT] [cecilia] load=0.38 mem=4910/8058MB temp=38.0C disk=19% [2026-03-14 17:26:01] [BEAT] [cecilia] load=0.26 mem=4952/8058MB temp=37.0C disk=19%
[2026-03-14 17:06:15] [BEAT] [cecilia] load=0.30 mem=4905/8058MB temp=37.0C disk=19% [2026-03-14 17:26:56] [BEAT] [cecilia] load=0.34 mem=4951/8058MB temp=38.0C disk=19%
[2026-03-14 17:06:15] [BEAT] [cecilia] load=0.30 mem=4904/8058MB temp=37.5C disk=19% [2026-03-14 17:26:56] [BEAT] [cecilia] load=0.34 mem=4947/8058MB temp=38.6C disk=19%

View File

@@ -1 +1 @@
{"node":"cecilia","ts":"2026-03-14T22:06:15Z","load":0.30,"mem_free_mb":4904,"mem_total_mb":8058,"temp_c":37.5,"disk_pct":19,"throttle":"N/A"} {"node":"cecilia","ts":"2026-03-14T22:26:56Z","load":0.34,"mem_free_mb":4947,"mem_total_mb":8058,"temp_c":38.6,"disk_pct":19,"throttle":"N/A"}

View File

@@ -1,19 +1,19 @@
{ {
"hostname": "cecilia", "hostname": "cecilia",
"ts": "2026-03-14T22:06:16Z", "ts": "2026-03-14T22:26:58Z",
"uptime_seconds": 73451, "uptime_seconds": 74693,
"kernel": "6.12.62+rpt-rpi-2712", "kernel": "6.12.62+rpt-rpi-2712",
"temp_c": 38.0, "temp_c": 37.5,
"memory_mb": { "memory_mb": {
"total": 8058, "total": 8058,
"used": 3161, "used": 3118,
"free": 4897 "free": 4940
}, },
"disk": "79G/457G (19%)", "disk": "79G/457G (19%)",
"load": [ "load": [
0.3, 0.34,
0.38, 0.35,
1.64 0.72
], ],
"ollama_models": [ "ollama_models": [
"deepseek-r1:1.5b", "deepseek-r1:1.5b",
@@ -34,5 +34,5 @@
], ],
"throttle": "N/A", "throttle": "N/A",
"voltage": "N/A", "voltage": "N/A",
"services_running": 39 "services_running": 40
} }

View File

@@ -1,18 +1,18 @@
{ {
"hostname": "gematria", "hostname": "gematria",
"ts": "2026-03-14T22:06:19Z", "ts": "2026-03-14T22:27:00Z",
"uptime_seconds": 5325173, "uptime_seconds": 5326414,
"kernel": "5.15.0-113-generic", "kernel": "5.15.0-113-generic",
"temp_c": 0, "temp_c": 0,
"memory_mb": { "memory_mb": {
"total": 7937, "total": 7937,
"used": 3307, "used": 3311,
"free": 4181 "free": 4178
}, },
"disk": "52G/78G (67%)", "disk": "52G/78G (67%)",
"load": [ "load": [
3.09, 3.04,
3.09, 3.08,
3.08 3.08
], ],
"ollama_models": [ "ollama_models": [

View File

@@ -1,50 +1,50 @@
[2026-03-14 16:45:47] [DIAL] [lucidia] Switchboard unreachable
[2026-03-14 16:46:01] [BEAT] [lucidia] load=3.23 mem=1214/8059MB temp=62.8C disk=32%
[2026-03-14 16:47:01] [BEAT] [lucidia] load=2.66 mem=1187/8059MB temp=56.8C disk=32%
[2026-03-14 16:48:01] [BEAT] [lucidia] load=3.91 mem=1170/8059MB temp=55.6C disk=32%
[2026-03-14 16:49:01] [BEAT] [lucidia] load=2.48 mem=1184/8059MB temp=61.7C disk=32%
[2026-03-14 16:50:01] [FLEET] [lucidia] Starting cross-node health check
[2026-03-14 16:50:01] [BEAT] [lucidia] load=2.21 mem=1222/8059MB temp=56.8C disk=32%
[2026-03-14 16:50:01] [HEAL] [lucidia] High swap: 3712MB — clearing inactive
[2026-03-14 16:50:02] [FLEET] [lucidia] alice: UP temp=37C mem=3294MB disk=87%
[2026-03-14 16:50:04] [FLEET] [lucidia] octavia: DOWN (no ping response)
[2026-03-14 16:50:04] [FLEET] [lucidia] cecilia: UP temp=45C mem=4834MB disk=19%
[2026-03-14 16:50:07] [FLEET] [lucidia] gematria: UP temp=C mem=4188MB disk=67%
[2026-03-14 16:50:07] [FLEET] [lucidia] aria: UP temp=53C mem=6938MB disk=81%
[2026-03-14 16:50:08] [FLEET] [lucidia] anastasia: UP temp=C mem=344MB disk=69%
[2026-03-14 16:50:47] [BEAT] [lucidia] load=3.14 mem=1199/8059MB temp=63.4C disk=32%
[2026-03-14 16:50:47] [BEAT] [lucidia] load=3.14 mem=1197/8059MB temp=63.9C disk=32%
[2026-03-14 16:51:01] [BEAT] [lucidia] load=3.28 mem=1221/8059MB temp=65.0C disk=32%
[2026-03-14 16:52:01] [BEAT] [lucidia] load=2.86 mem=1223/8059MB temp=56.2C disk=32%
[2026-03-14 16:53:01] [BEAT] [lucidia] load=2.77 mem=1188/8059MB temp=56.8C disk=32%
[2026-03-14 16:54:01] [BEAT] [lucidia] load=2.18 mem=1222/8059MB temp=60.6C disk=32%
[2026-03-14 16:55:01] [BEAT] [lucidia] load=2.29 mem=1191/8059MB temp=52.9C disk=32%
[2026-03-14 16:55:01] [HEAL] [lucidia] High swap: 3710MB — clearing inactive
[2026-03-14 16:55:56] [BEAT] [lucidia] load=3.49 mem=1263/8059MB temp=61.1C disk=32%
[2026-03-14 16:55:56] [BEAT] [lucidia] load=3.49 mem=1262/8059MB temp=60.6C disk=32%
[2026-03-14 16:56:01] [BEAT] [lucidia] load=3.85 mem=1281/8059MB temp=61.1C disk=32%
[2026-03-14 16:57:01] [BEAT] [lucidia] load=2.62 mem=1311/8059MB temp=52.4C disk=32%
[2026-03-14 16:58:01] [BEAT] [lucidia] load=2.08 mem=1286/8059MB temp=52.4C disk=32%
[2026-03-14 16:58:52] [DIAL] [lucidia] Switchboard unreachable
[2026-03-14 16:59:01] [BEAT] [lucidia] load=2.77 mem=1292/8059MB temp=58.4C disk=32%
[2026-03-14 17:00:01] [FLEET] [lucidia] Starting cross-node health check
[2026-03-14 17:00:01] [BEAT] [lucidia] load=2.78 mem=1317/8059MB temp=51.8C disk=32%
[2026-03-14 17:00:01] [HEAL] [lucidia] High swap: 3710MB — clearing inactive
[2026-03-14 17:00:02] [FLEET] [lucidia] alice: UP temp=35C mem=3279MB disk=87%
[2026-03-14 17:00:04] [FLEET] [lucidia] octavia: DOWN (no ping response)
[2026-03-14 17:00:05] [FLEET] [lucidia] cecilia: UP temp=40C mem=4753MB disk=19%
[2026-03-14 17:00:07] [FLEET] [lucidia] gematria: UP temp=C mem=4176MB disk=67%
[2026-03-14 17:00:07] [FLEET] [lucidia] aria: UP temp=52C mem=6930MB disk=81%
[2026-03-14 17:00:08] [FLEET] [lucidia] anastasia: UP temp=C mem=345MB disk=69%
[2026-03-14 17:00:46] [DIAL] [lucidia] Switchboard unreachable
[2026-03-14 17:01:01] [BEAT] [lucidia] load=3.42 mem=1230/8059MB temp=50.7C disk=32%
[2026-03-14 17:01:06] [BEAT] [lucidia] load=3.15 mem=1235/8059MB temp=52.4C disk=32%
[2026-03-14 17:01:07] [BEAT] [lucidia] load=3.15 mem=1235/8059MB temp=51.8C disk=32%
[2026-03-14 17:02:01] [BEAT] [lucidia] load=4.29 mem=1174/8059MB temp=54.0C disk=32%
[2026-03-14 17:03:01] [BEAT] [lucidia] load=1.84 mem=1267/8059MB temp=56.8C disk=32%
[2026-03-14 17:04:01] [BEAT] [lucidia] load=2.31 mem=1207/8059MB temp=52.4C disk=32%
[2026-03-14 17:05:02] [BEAT] [lucidia] load=2.00 mem=1242/8059MB temp=57.9C disk=32%
[2026-03-14 17:05:02] [HEAL] [lucidia] High swap: 3710MB — clearing inactive
[2026-03-14 17:06:01] [BEAT] [lucidia] load=1.66 mem=1215/8059MB temp=50.7C disk=32%
[2026-03-14 17:06:15] [BEAT] [lucidia] load=2.71 mem=1235/8059MB temp=58.4C disk=32% [2026-03-14 17:06:15] [BEAT] [lucidia] load=2.71 mem=1235/8059MB temp=58.4C disk=32%
[2026-03-14 17:06:15] [BEAT] [lucidia] load=2.71 mem=1244/8059MB temp=57.9C disk=32% [2026-03-14 17:06:15] [BEAT] [lucidia] load=2.71 mem=1244/8059MB temp=57.9C disk=32%
[2026-03-14 17:07:01] [BEAT] [lucidia] load=3.20 mem=1196/8059MB temp=55.6C disk=32%
[2026-03-14 17:08:01] [BEAT] [lucidia] load=2.15 mem=1186/8059MB temp=60.0C disk=32%
[2026-03-14 17:09:01] [BEAT] [lucidia] load=1.91 mem=1268/8059MB temp=52.9C disk=32%
[2026-03-14 17:10:01] [FLEET] [lucidia] Starting cross-node health check
[2026-03-14 17:10:01] [BEAT] [lucidia] load=3.50 mem=1259/8059MB temp=62.8C disk=32%
[2026-03-14 17:10:01] [HEAL] [lucidia] High swap: 3710MB — clearing inactive
[2026-03-14 17:10:02] [FLEET] [lucidia] alice: UP temp=36C mem=3298MB disk=87%
[2026-03-14 17:10:04] [FLEET] [lucidia] octavia: DOWN (no ping response)
[2026-03-14 17:10:05] [FLEET] [lucidia] cecilia: UP temp=39C mem=4868MB disk=19%
[2026-03-14 17:10:06] [FLEET] [lucidia] gematria: UP temp=C mem=4180MB disk=67%
[2026-03-14 17:10:07] [FLEET] [lucidia] aria: UP temp=53C mem=6924MB disk=81%
[2026-03-14 17:10:08] [FLEET] [lucidia] anastasia: UP temp=C mem=343MB disk=69%
[2026-03-14 17:11:32] [BEAT] [lucidia] load=19.03 mem=1333/8059MB temp=51.2C disk=32%
[2026-03-14 17:11:35] [BEAT] [lucidia] load=19.91 mem=1297/8059MB temp=58.4C disk=32%
[2026-03-14 17:11:35] [BEAT] [lucidia] load=19.91 mem=1297/8059MB temp=57.9C disk=32%
[2026-03-14 17:12:01] [BEAT] [lucidia] load=15.53 mem=1358/8059MB temp=62.2C disk=32%
[2026-03-14 17:13:01] [BEAT] [lucidia] load=6.60 mem=1373/8059MB temp=52.9C disk=32%
[2026-03-14 17:13:53] [DIAL] [lucidia] Switchboard unreachable
[2026-03-14 17:14:01] [BEAT] [lucidia] load=4.00 mem=1267/8059MB temp=55.1C disk=32%
[2026-03-14 17:15:01] [BEAT] [lucidia] load=3.42 mem=1310/8059MB temp=62.8C disk=32%
[2026-03-14 17:15:01] [HEAL] [lucidia] High swap: 3709MB — clearing inactive
[2026-03-14 17:15:47] [DIAL] [lucidia] Switchboard unreachable
[2026-03-14 17:16:33] [BEAT] [lucidia] load=14.58 mem=1338/8059MB temp=55.1C disk=32%
[2026-03-14 17:16:38] [BEAT] [lucidia] load=14.30 mem=1333/8059MB temp=59.0C disk=32%
[2026-03-14 17:16:38] [BEAT] [lucidia] load=14.30 mem=1333/8059MB temp=59.0C disk=32%
[2026-03-14 17:17:01] [BEAT] [lucidia] load=11.60 mem=1392/8059MB temp=63.9C disk=32%
[2026-03-14 17:18:01] [BEAT] [lucidia] load=5.46 mem=1284/8059MB temp=53.5C disk=32%
[2026-03-14 17:19:01] [BEAT] [lucidia] load=3.57 mem=1339/8059MB temp=55.6C disk=32%
[2026-03-14 17:20:01] [FLEET] [lucidia] Starting cross-node health check
[2026-03-14 17:20:01] [BEAT] [lucidia] load=4.03 mem=1319/8059MB temp=61.7C disk=32%
[2026-03-14 17:20:01] [HEAL] [lucidia] High swap: 3709MB — clearing inactive
[2026-03-14 17:20:03] [FLEET] [lucidia] alice: UP temp=36C mem=3285MB disk=87%
[2026-03-14 17:20:05] [FLEET] [lucidia] octavia: DOWN (no ping response)
[2026-03-14 17:20:06] [FLEET] [lucidia] cecilia: UP temp=38C mem=4933MB disk=19%
[2026-03-14 17:20:07] [FLEET] [lucidia] gematria: UP temp=C mem=4184MB disk=67%
[2026-03-14 17:20:07] [FLEET] [lucidia] aria: UP temp=54C mem=6922MB disk=81%
[2026-03-14 17:20:08] [FLEET] [lucidia] anastasia: UP temp=C mem=371MB disk=69%
[2026-03-14 17:21:01] [BEAT] [lucidia] load=4.51 mem=1254/8059MB temp=56.8C disk=32%
[2026-03-14 17:21:47] [BEAT] [lucidia] load=3.04 mem=1343/8059MB temp=61.7C disk=32%
[2026-03-14 17:21:47] [BEAT] [lucidia] load=3.04 mem=1343/8059MB temp=61.7C disk=32%
[2026-03-14 17:22:01] [BEAT] [lucidia] load=3.85 mem=1303/8059MB temp=63.4C disk=32%
[2026-03-14 17:23:01] [BEAT] [lucidia] load=2.41 mem=1313/8059MB temp=54.5C disk=32%
[2026-03-14 17:24:01] [BEAT] [lucidia] load=2.09 mem=1281/8059MB temp=57.3C disk=32%
[2026-03-14 17:25:02] [BEAT] [lucidia] load=2.58 mem=1351/8059MB temp=63.4C disk=32%
[2026-03-14 17:25:02] [HEAL] [lucidia] High swap: 3709MB — clearing inactive
[2026-03-14 17:26:33] [BEAT] [lucidia] load=18.98 mem=1314/8059MB temp=55.1C disk=32%
[2026-03-14 17:26:56] [BEAT] [lucidia] load=15.05 mem=1272/8059MB temp=63.9C disk=32%
[2026-03-14 17:26:56] [BEAT] [lucidia] load=15.05 mem=1272/8059MB temp=63.9C disk=32%

View File

@@ -1 +1 @@
{"node":"lucidia","ts":"2026-03-14T22:06:15Z","load":2.71,"mem_free_mb":1244,"mem_total_mb":8059,"temp_c":57.9,"disk_pct":32,"throttle":"N/A"} {"node":"lucidia","ts":"2026-03-14T22:26:56Z","load":15.05,"mem_free_mb":1272,"mem_total_mb":8059,"temp_c":63.9,"disk_pct":32,"throttle":"N/A"}

View File

@@ -1,19 +1,19 @@
{ {
"hostname": "octavia", "hostname": "octavia",
"ts": "2026-03-14T22:06:16Z", "ts": "2026-03-14T22:26:57Z",
"uptime_seconds": 143053, "uptime_seconds": 144294,
"kernel": "6.12.62+rpt-rpi-2712", "kernel": "6.12.62+rpt-rpi-2712",
"temp_c": 57.3, "temp_c": 61.7,
"memory_mb": { "memory_mb": {
"total": 8059, "total": 8059,
"used": 6813, "used": 6787,
"free": 1245 "free": 1271
}, },
"disk": "72G/235G (32%)", "disk": "72G/235G (32%)",
"load": [ "load": [
2.71, 15.05,
2.66, 7.52,
3.46 5.57
], ],
"ollama_models": [ "ollama_models": [
"qwen2.5:3b", "qwen2.5:3b",

View File

@@ -1 +1 @@
{"node":"octavia","status":"down","ts":"2026-03-14T22:06:16Z"} {"node":"octavia","status":"down","ts":"2026-03-14T22:26:57Z"}

View File

@@ -0,0 +1 @@
blackroad-wake-words.sh

View File

@@ -1,17 +0,0 @@
#!/usr/bin/env bash
set -e
OUTDIR="$HOME/blackroad-sites"
TMP="$(mktemp)"
ollama run blackroad-web > "$TMP"
# Expect first line to be a file path
FILE="$(head -n 1 "$TMP")"
CONTENT="$(tail -n +2 "$TMP")"
FULL="$OUTDIR/$FILE"
mkdir -p "$(dirname "$FULL")"
echo "$CONTENT" > "$FULL"
echo "✔ wrote $FULL"

1
scripts/br-web.sh Symbolic link
View File

@@ -0,0 +1 @@
/Users/alexa/bin/br-web

87
sync.sh Executable file
View File

@@ -0,0 +1,87 @@
#!/bin/bash
# BlackRoad Monorepo Sync — keeps the monorepo current with all sources
# Usage: ./sync.sh [push]
# Run without args to sync locally, with "push" to also push to Gitea
set -e
MONO=~/blackroad-monorepo
PINK='\033[38;5;205m'
GREEN='\033[38;5;82m'
BLUE='\033[38;5;69m'
RESET='\033[0m'
echo -e "${PINK}🛣️ BlackRoad Monorepo Sync${RESET}"
echo ""
# Sync bin/ and scripts/ from ~/local
echo -e "${BLUE}Syncing bin/...${RESET}"
rsync -a --delete --exclude='rclone' --exclude='cloudflared' --exclude='nsc' \
~/local/bin/ "$MONO/bin/"
echo -e "${BLUE}Syncing scripts/...${RESET}"
rsync -a --delete ~/local/scripts/ "$MONO/scripts/"
echo -e "${BLUE}Syncing fleet/...${RESET}"
rsync -a --delete ~/local/fleet/ "$MONO/fleet/"
echo -e "${BLUE}Syncing roadc/...${RESET}"
rsync -a --delete ~/local/roadc/ "$MONO/roadc/"
echo -e "${BLUE}Syncing roadnet/...${RESET}"
rsync -a --delete ~/local/roadnet/ "$MONO/roadnet/"
echo -e "${BLUE}Syncing config/...${RESET}"
rsync -a --delete ~/local/config/ "$MONO/config/"
echo -e "${BLUE}Syncing dotfiles/...${RESET}"
rsync -a --delete ~/local/dotfiles/ "$MONO/dotfiles/"
echo -e "${BLUE}Syncing memory/...${RESET}"
rsync -a --delete ~/local/memory/ "$MONO/memory/"
# Sync worker sources
echo -e "${BLUE}Syncing workers/...${RESET}"
mkdir -p "$MONO/workers"
for worker_dir in roadcode-squad squad-webhook tollbooth road-search stats-blackroad blackroad-stripe; do
src=~/"$worker_dir"
if [ -d "$src/src" ]; then
rsync -a "$src/src/" "$MONO/workers/${worker_dir}-src/"
[ -f "$src/wrangler.toml" ] && cp "$src/wrangler.toml" "$MONO/workers/${worker_dir}.toml"
elif [ -f "$src/collect.sh" ]; then
cp "$src/collect.sh" "$MONO/workers/stats-collect.sh"
fi
done
# Sync operator memory scripts
echo -e "${BLUE}Syncing operator/...${RESET}"
mkdir -p "$MONO/operator"
rsync -a ~/blackroad-operator/scripts/memory/ "$MONO/operator/memory/"
# Sync docs
if [ -d ~/docs-blackroad-io ]; then
echo -e "${BLUE}Syncing docs/...${RESET}"
rsync -a --exclude='.git' ~/docs-blackroad-io/ "$MONO/docs/"
fi
# Commit if there are changes
cd "$MONO"
changes=$(git status --porcelain | wc -l | tr -d ' ')
if [ "$changes" -gt 0 ]; then
git add -A
file_count=$(git diff --cached --stat | tail -1 | grep -oE '[0-9]+ file' | grep -oE '[0-9]+')
git commit -m "sync: $(date +%Y-%m-%d\ %H:%M)${file_count:-0} files from Alexandria"
echo -e "${GREEN}✅ Committed ${file_count:-0} changed files${RESET}"
if [ "$1" = "push" ]; then
echo -e "${BLUE}Pushing to Gitea...${RESET}"
git -c http.extraHeader="Authorization: token $(cat ~/.blackroad-gitea-token 2>/dev/null || echo 'd5a02fc6a82ce1a71a30023804df3fd404401303')" \
push gitea main 2>&1
echo -e "${GREEN}✅ Pushed to git.blackroad.io${RESET}"
fi
else
echo -e "${GREEN}✅ Already up to date${RESET}"
fi
echo ""
echo -e "${PINK}BlackRoad OS — Pave Tomorrow.${RESET}"

View File

@@ -0,0 +1,295 @@
// BlackRoad Stripe Worker — checkout, portal, prices, webhooks, admin
const SECURITY_HEADERS = {
'X-Content-Type-Options': 'nosniff',
'X-Frame-Options': 'SAMEORIGIN',
'X-XSS-Protection': '1; mode=block',
'Referrer-Policy': 'strict-origin-when-cross-origin',
'Permissions-Policy': 'geolocation=(), microphone=(), camera=()',
'Strict-Transport-Security': 'max-age=31536000; includeSubDomains; preload',
};
function corsHeaders(origin) {
return {
'Access-Control-Allow-Origin': origin || '*',
'Access-Control-Allow-Methods': 'GET, POST, OPTIONS',
'Access-Control-Allow-Headers': 'Content-Type, Authorization',
'Access-Control-Max-Age': '86400',
};
}
async function stripeRequest(env, method, path, body = null) {
const url = `https://api.stripe.com/v1${path}`;
const headers = {
'Authorization': `Bearer ${env.STRIPE_SECRET_KEY}`,
'Content-Type': 'application/x-www-form-urlencoded',
};
const options = { method, headers };
if (body) {
options.body = new URLSearchParams(body).toString();
}
const res = await fetch(url, options);
const data = await res.json();
if (!res.ok) {
throw new Error(data.error?.message || `Stripe error: ${res.status}`);
}
return data;
}
// ─── Checkout ─────────────────────────────────────────────────────────
async function handleCheckout(request, env) {
const body = await request.json();
const { price_id, success_url, cancel_url, customer_email, metadata = {} } = body;
if (!price_id) {
return Response.json({ error: 'price_id is required' }, { status: 400 });
}
const origin = request.headers.get('Origin') || '*';
const successUrl = success_url || `${origin}/billing?success=true&session_id={CHECKOUT_SESSION_ID}`;
const cancelUrl = cancel_url || `${origin}/pricing`;
const params = {
mode: 'subscription',
'line_items[0][price]': price_id,
'line_items[0][quantity]': '1',
success_url: successUrl,
cancel_url: cancelUrl,
'automatic_tax[enabled]': 'true',
'subscription_data[metadata][source]': 'blackroad-os',
...Object.fromEntries(
Object.entries(metadata).map(([k, v]) => [`metadata[${k}]`, v])
),
};
if (customer_email) {
params.customer_email = customer_email;
}
const session = await stripeRequest(env, 'POST', '/checkout/sessions', params);
return Response.json({ url: session.url, session_id: session.id });
}
// ─── Billing Portal ───────────────────────────────────────────────────
async function handlePortal(request, env) {
const { customer_id, return_url } = await request.json();
if (!customer_id) {
return Response.json({ error: 'customer_id is required' }, { status: 400 });
}
const origin = request.headers.get('Origin') || '*';
const session = await stripeRequest(env, 'POST', '/billing_portal/sessions', {
customer: customer_id,
return_url: return_url || `${origin}/account`,
});
return Response.json({ url: session.url });
}
// ─── Prices ───────────────────────────────────────────────────────────
async function handlePrices(env) {
const prices = await stripeRequest(env, 'GET', '/prices?active=true&expand[]=data.product&limit=100');
const formatted = prices.data
.filter((p) => p.product && !p.product.deleted)
.map((p) => ({
id: p.id,
amount: p.unit_amount,
currency: p.currency,
interval: p.recurring?.interval,
interval_count: p.recurring?.interval_count,
product: {
id: p.product.id,
name: p.product.name,
description: p.product.description,
metadata: p.product.metadata,
},
}))
.sort((a, b) => (a.amount || 0) - (b.amount || 0));
return Response.json({ prices: formatted, count: formatted.length });
}
// ─── Products (list all) ─────────────────────────────────────────────
async function handleProducts(env) {
const products = await stripeRequest(env, 'GET', '/products?active=true&limit=100');
const formatted = products.data.map((p) => ({
id: p.id,
name: p.name,
description: p.description,
metadata: p.metadata,
images: p.images,
default_price: p.default_price,
created: p.created,
}));
return Response.json({ products: formatted, count: formatted.length });
}
// ─── Webhook ──────────────────────────────────────────────────────────
async function handleWebhook(request, env) {
const signature = request.headers.get('stripe-signature');
const body = await request.text();
if (env.STRIPE_WEBHOOK_SECRET) {
try {
await verifyStripeSignature(body, signature, env.STRIPE_WEBHOOK_SECRET);
} catch (err) {
return new Response(`Webhook signature verification failed: ${err.message}`, { status: 400 });
}
}
let event;
try {
event = JSON.parse(body);
} catch {
return new Response('Invalid JSON', { status: 400 });
}
// Forward to Slack hub
const slackRelay = fetch('https://blackroad-slack.amundsonalexa.workers.dev/stripe', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(event)
}).catch(() => {});
switch (event.type) {
case 'checkout.session.completed': {
const session = event.data.object;
console.log(`✓ Checkout completed: ${session.id} | customer: ${session.customer}`);
break;
}
case 'customer.subscription.created':
case 'customer.subscription.updated': {
const sub = event.data.object;
console.log(`✓ Subscription ${event.type}: ${sub.id} | status: ${sub.status}`);
break;
}
case 'customer.subscription.deleted': {
const sub = event.data.object;
console.log(`✓ Subscription cancelled: ${sub.id}`);
break;
}
case 'invoice.payment_failed': {
const invoice = event.data.object;
console.log(`✗ Payment failed: ${invoice.id} | customer: ${invoice.customer}`);
break;
}
case 'invoice.payment_succeeded': {
const invoice = event.data.object;
console.log(`✓ Payment succeeded: ${invoice.id}`);
break;
}
default:
console.log(`Unhandled event: ${event.type}`);
}
await slackRelay;
return Response.json({ received: true });
}
async function verifyStripeSignature(payload, sigHeader, secret) {
if (!sigHeader) throw new Error('Missing stripe-signature header');
const parts = sigHeader.split(',').reduce((acc, part) => {
const [k, v] = part.split('=');
acc[k] = v;
return acc;
}, {});
const timestamp = parts.t;
const signatures = Object.entries(parts)
.filter(([k]) => k === 'v1')
.map(([, v]) => v);
if (!timestamp || signatures.length === 0) {
throw new Error('Invalid stripe-signature format');
}
const tolerance = 300;
const now = Math.floor(Date.now() / 1000);
if (Math.abs(now - parseInt(timestamp)) > tolerance) {
throw new Error('Timestamp outside tolerance window');
}
const signedPayload = `${timestamp}.${payload}`;
const enc = new TextEncoder();
const key = await crypto.subtle.importKey(
'raw',
enc.encode(secret),
{ name: 'HMAC', hash: 'SHA-256' },
false,
['sign']
);
const sig = await crypto.subtle.sign('HMAC', key, enc.encode(signedPayload));
const computed = Array.from(new Uint8Array(sig))
.map((b) => b.toString(16).padStart(2, '0'))
.join('');
if (!signatures.includes(computed)) {
throw new Error('Signature mismatch');
}
}
// ─── Router ───────────────────────────────────────────────────────────
export default {
async fetch(request, env) {
const url = new URL(request.url);
const origin = request.headers.get('Origin') || '*';
const cors = corsHeaders(origin);
if (request.method === 'OPTIONS') {
return new Response(null, { status: 204, headers: { ...cors, ...SECURITY_HEADERS } });
}
if (!env.STRIPE_SECRET_KEY) {
return Response.json(
{ error: 'Stripe not configured' },
{ status: 503, headers: { ...cors, ...SECURITY_HEADERS } }
);
}
let response;
try {
switch (true) {
case url.pathname === '/health':
response = Response.json({
status: 'ok',
worker: 'blackroad-stripe',
version: '2.0.0',
time: new Date().toISOString(),
});
break;
case request.method === 'POST' && url.pathname === '/checkout':
response = await handleCheckout(request, env);
break;
case request.method === 'POST' && url.pathname === '/portal':
response = await handlePortal(request, env);
break;
case request.method === 'POST' && url.pathname === '/webhook':
return await handleWebhook(request, env);
case request.method === 'GET' && url.pathname === '/prices':
response = await handlePrices(env);
break;
case request.method === 'GET' && url.pathname === '/products':
response = await handleProducts(env);
break;
default:
response = Response.json(
{ error: 'Not found', routes: ['/health', '/checkout', '/portal', '/webhook', '/prices', '/products'] },
{ status: 404 }
);
}
} catch (err) {
console.error('Worker error:', err);
response = Response.json({ error: err.message }, { status: 500 });
}
const headers = new Headers(response.headers);
for (const [k, v] of Object.entries({ ...cors, ...SECURITY_HEADERS })) {
headers.set(k, v);
}
return new Response(response.body, { status: response.status, headers });
},
};

View File

@@ -0,0 +1,8 @@
name = "blackroad-stripe"
main = "src/worker.js"
compatibility_date = "2024-12-01"
workers_dev = true
routes = [
{ pattern = "stripe.blackroad.io/*", zone_name = "blackroad.io" }
]

View File

@@ -0,0 +1,188 @@
// BlackRoad Stats API — Central live data hub for all BlackRoad websites
// KV-backed, pushed from Mac cron, consumed by all frontends
//
// GET /fleet — node status, specs, services
// GET /infra — infrastructure counts (tunnels, DBs, ports, etc.)
// GET /github — live GitHub data (proxied + cached)
// GET /analytics — proxied from analytics worker
// GET /all — combined payload for single-fetch
// POST /push — push data from collector (requires STATS_KEY)
// GET /health — uptime check
const CORS = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'GET,POST,OPTIONS',
'Access-Control-Allow-Headers': 'Content-Type,Authorization',
'Cache-Control': 'public, max-age=30',
};
function json(data, status = 200) {
return new Response(JSON.stringify(data), {
status,
headers: { ...CORS, 'Content-Type': 'application/json' },
});
}
const GITHUB_USER = 'blackboxprogramming';
const ANALYTICS_URL = 'https://analytics-blackroad.amundsonalexa.workers.dev';
export default {
async fetch(request, env) {
const url = new URL(request.url);
const path = url.pathname;
if (request.method === 'OPTIONS') {
return new Response(null, { headers: CORS });
}
try {
// ── Push data from collector ──
if (path === '/push' && request.method === 'POST') {
const authKey = request.headers.get('Authorization')?.replace('Bearer ', '') ||
url.searchParams.get('key');
if (authKey !== env.STATS_KEY) return json({ error: 'unauthorized' }, 401);
const body = await request.json();
const { category, data } = body;
if (!category || !data) return json({ error: 'category and data required' }, 400);
// Store with timestamp
const payload = { data, updated_at: new Date().toISOString() };
await env.STATS.put(`stats:${category}`, JSON.stringify(payload));
return json({ ok: true, category });
}
// ── Fleet status ──
if (path === '/fleet') {
const raw = await env.STATS.get('stats:fleet');
if (!raw) return json({ error: 'no fleet data yet', hint: 'run collector' }, 404);
return json(JSON.parse(raw));
}
// ── Infrastructure counts ──
if (path === '/infra') {
const raw = await env.STATS.get('stats:infra');
if (!raw) return json({ error: 'no infra data yet' }, 404);
return json(JSON.parse(raw));
}
// ── GitHub data (proxied + cached 5min) ──
if (path === '/github') {
const cached = await env.STATS.get('cache:github');
if (cached) return json(JSON.parse(cached));
// Fetch repos (2 pages to get all)
const [p1, p2] = await Promise.all([
fetch(`https://api.github.com/users/${GITHUB_USER}/repos?per_page=100&page=1&sort=updated`, {
headers: { 'User-Agent': 'BlackRoad-Stats/1.0', ...(env.GITHUB_TOKEN ? { 'Authorization': `token ${env.GITHUB_TOKEN}` } : {}) }
}),
fetch(`https://api.github.com/users/${GITHUB_USER}/repos?per_page=100&page=2&sort=updated`, {
headers: { 'User-Agent': 'BlackRoad-Stats/1.0', ...(env.GITHUB_TOKEN ? { 'Authorization': `token ${env.GITHUB_TOKEN}` } : {}) }
}),
]);
const repos1 = await p1.json();
const repos2 = await p2.json();
const allRepos = [...(Array.isArray(repos1) ? repos1 : []), ...(Array.isArray(repos2) ? repos2 : [])];
const nonFork = allRepos.filter(r => !r.fork);
const result = {
total_repos: allRepos.length,
non_fork_repos: nonFork.length,
forks: allRepos.length - nonFork.length,
total_stars: nonFork.reduce((s, r) => s + (r.stargazers_count || 0), 0),
total_size_kb: nonFork.reduce((s, r) => s + (r.size || 0), 0),
languages: [...new Set(nonFork.map(r => r.language).filter(Boolean))],
most_recent: nonFork.slice(0, 5).map(r => ({
name: r.name,
updated: r.updated_at,
language: r.language,
stars: r.stargazers_count,
})),
fetched_at: new Date().toISOString(),
};
await env.STATS.put('cache:github', JSON.stringify(result), { expirationTtl: 300 });
return json(result);
}
// ── Analytics proxy ──
if (path === '/analytics') {
const range = url.searchParams.get('range') || '24h';
const cached = await env.STATS.get(`cache:analytics:${range}`);
if (cached) return json(JSON.parse(cached));
const res = await fetch(`${ANALYTICS_URL}/stats?range=${range}`);
if (!res.ok) return json({ error: 'analytics unavailable' }, 502);
const data = await res.json();
await env.STATS.put(`cache:analytics:${range}`, JSON.stringify(data), { expirationTtl: 60 });
return json(data);
}
// ── Combined payload ──
if (path === '/all') {
const [fleet, infra, github, analytics] = await Promise.all([
env.STATS.get('stats:fleet'),
env.STATS.get('stats:infra'),
env.STATS.get('cache:github').then(c => c || fetchGitHub(env)),
env.STATS.get('cache:analytics:24h').then(c => c || env.STATS.get('stats:analytics')).then(c => c || fetchAnalytics(env)),
]);
return json({
fleet: fleet ? JSON.parse(fleet) : null,
infra: infra ? JSON.parse(infra) : null,
github: github ? (typeof github === 'string' ? JSON.parse(github) : github) : null,
analytics: analytics ? (typeof analytics === 'string' ? JSON.parse(analytics) : analytics) : null,
});
}
// ── Health ──
if (path === '/health') {
const fleet = await env.STATS.get('stats:fleet');
const fleetAge = fleet ? JSON.parse(fleet).updated_at : null;
return json({
status: 'up',
fleet_data: fleetAge ? `last updated ${fleetAge}` : 'no data yet',
});
}
return json({ error: 'not found', endpoints: ['/fleet', '/infra', '/github', '/analytics', '/all', '/push', '/health'] }, 404);
} catch (err) {
return json({ error: err.message }, 500);
}
},
};
async function fetchGitHub(env) {
try {
const [p1, p2] = await Promise.all([
fetch(`https://api.github.com/users/${GITHUB_USER}/repos?per_page=100&page=1&sort=updated`, {
headers: { 'User-Agent': 'BlackRoad-Stats/1.0', ...(env.GITHUB_TOKEN ? { 'Authorization': `token ${env.GITHUB_TOKEN}` } : {}) }
}),
fetch(`https://api.github.com/users/${GITHUB_USER}/repos?per_page=100&page=2&sort=updated`, {
headers: { 'User-Agent': 'BlackRoad-Stats/1.0', ...(env.GITHUB_TOKEN ? { 'Authorization': `token ${env.GITHUB_TOKEN}` } : {}) }
}),
]);
const repos = [...await p1.json(), ...await p2.json()];
const nonFork = repos.filter(r => !r.fork);
const result = {
total_repos: repos.length,
non_fork_repos: nonFork.length,
forks: repos.length - nonFork.length,
total_stars: nonFork.reduce((s, r) => s + (r.stargazers_count || 0), 0),
languages: [...new Set(nonFork.map(r => r.language).filter(Boolean))],
fetched_at: new Date().toISOString(),
};
await env.STATS.put('cache:github', JSON.stringify(result), { expirationTtl: 300 });
return JSON.stringify(result);
} catch { return null; }
}
async function fetchAnalytics(env) {
try {
const res = await fetch(`${ANALYTICS_URL}/stats?range=24h`);
const data = await res.json();
await env.STATS.put('cache:analytics:24h', JSON.stringify(data), { expirationTtl: 60 });
return JSON.stringify(data);
} catch { return null; }
}

View File

@@ -0,0 +1,8 @@
name = "stats-blackroad"
main = "src/worker.js"
compatibility_date = "2024-12-01"
workers_dev = true
[[kv_namespaces]]
binding = "STATS"
id = "9555ec8a18aa4ff0a7ca9aa2b09cf877"

File diff suppressed because it is too large Load Diff

21
workers/tollbooth.toml Normal file
View File

@@ -0,0 +1,21 @@
name = "roadpay"
main = "src/worker.js"
compatibility_date = "2024-12-01"
workers_dev = true
routes = [
{ pattern = "pay.blackroad.io/*", zone_name = "blackroad.io" }
]
[[d1_databases]]
binding = "DB"
database_name = "tollbooth"
database_id = "29a255f5-a449-485c-90e6-38b7e4bd934d"
[[kv_namespaces]]
binding = "CACHE"
id = "68e5424a74234de197d5694127d2c3e5"
[vars]
OLLAMA_URL = "https://ollama.blackroad.io"
AUTH_API = "https://auth.blackroad.io"