Add 10 deployment and infrastructure scripts
- Pi cluster deployment for fleet management - Cloudflare worker and DNS deployment - Full service deployment orchestration - Tunnel and VPN setup (Cloudflare, Tailscale) - K3s and Docker setup for container orchestration - Pi fleet management for hardware nodes Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
90
scripts/cloudflare-dns.sh
Normal file
90
scripts/cloudflare-dns.sh
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# BlackRoad Cloudflare DNS Configuration
|
||||||
|
# Points all 19 domains to Pi cluster and DO droplets
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||||
|
echo " 🖤 BLACKROAD CLOUDFLARE DNS SETUP"
|
||||||
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Target IPs
|
||||||
|
ALICE_IP="192.168.4.49"
|
||||||
|
LUCIDIA_IP="192.168.4.38"
|
||||||
|
CODEX_IP="159.65.43.12"
|
||||||
|
SHELLFISH_IP="174.138.44.45"
|
||||||
|
|
||||||
|
# Cloudflare Account ID
|
||||||
|
CF_ACCOUNT_ID="848cf0b18d51e0170e0d1537aec3505a"
|
||||||
|
|
||||||
|
# Domains to configure
|
||||||
|
DOMAINS=(
|
||||||
|
"aliceqi.com"
|
||||||
|
"blackboxprogramming.io"
|
||||||
|
"blackroadai.com"
|
||||||
|
"blackroad.company"
|
||||||
|
"blackroadinc.us"
|
||||||
|
"blackroad.io"
|
||||||
|
"blackroad.me"
|
||||||
|
"blackroad.network"
|
||||||
|
"blackroadqi.com"
|
||||||
|
"blackroadquantum.com"
|
||||||
|
"blackroadquantum.info"
|
||||||
|
"blackroadquantum.net"
|
||||||
|
"blackroadquantum.shop"
|
||||||
|
"blackroadquantum.store"
|
||||||
|
"blackroad.systems"
|
||||||
|
"lucidiaqi.com"
|
||||||
|
"lucidia.studio"
|
||||||
|
"roadchain.io"
|
||||||
|
"roadcoin.io"
|
||||||
|
)
|
||||||
|
|
||||||
|
echo "📋 Configuration Strategy:"
|
||||||
|
echo ""
|
||||||
|
echo " Primary (Public): $CODEX_IP (codex-infinity)"
|
||||||
|
echo " Secondary: $SHELLFISH_IP (shellfish)"
|
||||||
|
echo " Pi Cluster: $ALICE_IP, $LUCIDIA_IP (local network)"
|
||||||
|
echo ""
|
||||||
|
echo "We'll use Cloudflare Tunnel OR direct IP configuration"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Check if we have CF API access
|
||||||
|
if ! command -v wrangler &> /dev/null; then
|
||||||
|
echo "❌ Wrangler not found. Install with: npm install -g wrangler"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "🔍 Checking Cloudflare authentication..."
|
||||||
|
wrangler whoami 2>&1 | head -5
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||||
|
echo "DNS Update Options:"
|
||||||
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||||
|
echo ""
|
||||||
|
echo "Option 1: Point to Public DigitalOcean Droplets"
|
||||||
|
echo " • Immediate access"
|
||||||
|
echo " • Use $CODEX_IP or $SHELLFISH_IP"
|
||||||
|
echo ""
|
||||||
|
echo "Option 2: Use Cloudflare Tunnel (Recommended for Pi)"
|
||||||
|
echo " • Secure zero-trust access"
|
||||||
|
echo " • No port forwarding needed"
|
||||||
|
echo " • Encrypts traffic"
|
||||||
|
echo ""
|
||||||
|
echo "Option 3: Manual DNS via Cloudflare Dashboard"
|
||||||
|
echo " • Visit: dash.cloudflare.com"
|
||||||
|
echo " • Add A records pointing to IPs"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Generate wrangler commands for DNS updates
|
||||||
|
echo "🔧 Generated Commands:"
|
||||||
|
echo ""
|
||||||
|
echo "# To point domains to codex-infinity (public):"
|
||||||
|
for domain in "${DOMAINS[@]}"; do
|
||||||
|
echo "wrangler dns create $domain A @ --content $CODEX_IP --proxied false"
|
||||||
|
done
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||||
120
scripts/deploy-all.sh
Normal file
120
scripts/deploy-all.sh
Normal file
@@ -0,0 +1,120 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# BlackRoad Services - Mass Deployment Script
|
||||||
|
# Deploys all services to Vercel production
|
||||||
|
#
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Colors for output
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
RED='\033[0;31m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# Service directories
|
||||||
|
SERVICES=(
|
||||||
|
"api"
|
||||||
|
"brand"
|
||||||
|
"core"
|
||||||
|
"demo"
|
||||||
|
"docs"
|
||||||
|
"ideas"
|
||||||
|
"infra"
|
||||||
|
"operator"
|
||||||
|
"prism"
|
||||||
|
"research"
|
||||||
|
"web"
|
||||||
|
)
|
||||||
|
|
||||||
|
echo -e "${BLUE}🚀 BlackRoad Mass Deployment Script${NC}"
|
||||||
|
echo -e "${BLUE}====================================${NC}\n"
|
||||||
|
|
||||||
|
# Track results
|
||||||
|
SUCCESSFUL=()
|
||||||
|
FAILED=()
|
||||||
|
SKIPPED=()
|
||||||
|
|
||||||
|
# Function to deploy a service
|
||||||
|
deploy_service() {
|
||||||
|
local service=$1
|
||||||
|
local service_path="services/$service"
|
||||||
|
|
||||||
|
echo -e "${BLUE}📦 Deploying: ${service}${NC}"
|
||||||
|
|
||||||
|
# Check if service exists
|
||||||
|
if [ ! -d "$service_path" ]; then
|
||||||
|
echo -e "${YELLOW}⚠️ Service directory not found: $service_path${NC}"
|
||||||
|
SKIPPED+=("$service")
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if package.json exists
|
||||||
|
if [ ! -f "$service_path/package.json" ]; then
|
||||||
|
echo -e "${YELLOW}⚠️ No package.json found in $service_path${NC}"
|
||||||
|
SKIPPED+=("$service")
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Deploy to Vercel
|
||||||
|
cd "$service_path"
|
||||||
|
|
||||||
|
if vercel --prod --yes 2>&1 | tee /tmp/deploy-$service.log | tail -1; then
|
||||||
|
# Extract URL from log
|
||||||
|
URL=$(grep -oE 'https://[a-z0-9-]+\.vercel\.app' /tmp/deploy-$service.log | tail -1)
|
||||||
|
echo -e "${GREEN}✅ Successfully deployed: $service${NC}"
|
||||||
|
echo -e "${GREEN} URL: $URL${NC}\n"
|
||||||
|
SUCCESSFUL+=("$service:$URL")
|
||||||
|
else
|
||||||
|
echo -e "${RED}❌ Failed to deploy: $service${NC}\n"
|
||||||
|
FAILED+=("$service")
|
||||||
|
fi
|
||||||
|
|
||||||
|
cd - > /dev/null
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main deployment loop
|
||||||
|
echo -e "${BLUE}Starting deployment of ${#SERVICES[@]} services...${NC}\n"
|
||||||
|
|
||||||
|
for service in "${SERVICES[@]}"; do
|
||||||
|
deploy_service "$service"
|
||||||
|
sleep 2 # Brief pause between deployments
|
||||||
|
done
|
||||||
|
|
||||||
|
# Summary
|
||||||
|
echo -e "\n${BLUE}================================${NC}"
|
||||||
|
echo -e "${BLUE}📊 Deployment Summary${NC}"
|
||||||
|
echo -e "${BLUE}================================${NC}\n"
|
||||||
|
|
||||||
|
echo -e "${GREEN}✅ Successful (${#SUCCESSFUL[@]}):${NC}"
|
||||||
|
for item in "${SUCCESSFUL[@]}"; do
|
||||||
|
service="${item%%:*}"
|
||||||
|
url="${item##*:}"
|
||||||
|
echo -e " - $service → $url"
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ ${#FAILED[@]} -gt 0 ]; then
|
||||||
|
echo -e "\n${RED}❌ Failed (${#FAILED[@]}):${NC}"
|
||||||
|
for service in "${FAILED[@]}"; do
|
||||||
|
echo -e " - $service"
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ${#SKIPPED[@]} -gt 0 ]; then
|
||||||
|
echo -e "\n${YELLOW}⚠️ Skipped (${#SKIPPED[@]}):${NC}"
|
||||||
|
for service in "${SKIPPED[@]}"; do
|
||||||
|
echo -e " - $service"
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo -e "\n${BLUE}================================${NC}"
|
||||||
|
echo -e "${GREEN}🎉 Deployment Complete!${NC}"
|
||||||
|
echo -e "${BLUE}================================${NC}\n"
|
||||||
|
|
||||||
|
# Exit with error if any failed
|
||||||
|
if [ ${#FAILED[@]} -gt 0 ]; then
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
exit 0
|
||||||
42
scripts/deploy-cloudflare.sh
Normal file
42
scripts/deploy-cloudflare.sh
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Deploy Cloudflare Worker for BlackRoad
|
||||||
|
|
||||||
|
echo "🚀 Deploying BlackRoad Cloudflare Worker"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Check if wrangler is installed
|
||||||
|
if ! command -v wrangler &>/dev/null; then
|
||||||
|
echo "📦 Installing wrangler..."
|
||||||
|
npm install -g wrangler
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check auth
|
||||||
|
echo "🔐 Checking Cloudflare authentication..."
|
||||||
|
if ! wrangler whoami &>/dev/null; then
|
||||||
|
echo "⚠️ Not logged in. Running: wrangler login"
|
||||||
|
wrangler login
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Deploy worker
|
||||||
|
echo "📤 Deploying worker..."
|
||||||
|
cd ~
|
||||||
|
|
||||||
|
if wrangler deploy blackroad-deploy-worker.js --name blackroad-deploy-dispatcher; then
|
||||||
|
echo ""
|
||||||
|
echo "✅ Worker deployed successfully!"
|
||||||
|
echo ""
|
||||||
|
echo "🌐 Your worker URL will be shown above"
|
||||||
|
echo ""
|
||||||
|
echo "Next steps:"
|
||||||
|
echo " 1. Copy the worker URL"
|
||||||
|
echo " 2. Go to GitHub repo settings → Webhooks"
|
||||||
|
echo " 3. Add webhook with URL: https://blackroad-deploy-dispatcher.YOUR_SUBDOMAIN.workers.dev/webhook/github"
|
||||||
|
echo " 4. Set Content type: application/json"
|
||||||
|
echo " 5. Select: Just the push event"
|
||||||
|
else
|
||||||
|
echo "❌ Deployment failed"
|
||||||
|
echo ""
|
||||||
|
echo "Manual deployment:"
|
||||||
|
echo " 1. wrangler login"
|
||||||
|
echo " 2. wrangler deploy ~/blackroad-deploy-worker.js --name blackroad-deploy-dispatcher"
|
||||||
|
fi
|
||||||
468
scripts/deploy-pi-cluster.sh
Normal file
468
scripts/deploy-pi-cluster.sh
Normal file
@@ -0,0 +1,468 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# 🥧 Deploy BlackRoad Products to Raspberry Pi Cluster
|
||||||
|
# Deploys backend services across 3 Raspberry Pis
|
||||||
|
|
||||||
|
echo "🥧 BlackRoad Pi Cluster Deployment"
|
||||||
|
echo "========================================="
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Pi cluster configuration (name:ip pairs)
|
||||||
|
PI_CLUSTER=(
|
||||||
|
"lucidia:192.168.4.38"
|
||||||
|
"blackroad-pi:192.168.4.64"
|
||||||
|
"lucidia-alt:192.168.4.99"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Products with backend services suitable for Pi deployment
|
||||||
|
BACKEND_PRODUCTS=(
|
||||||
|
"blackroad-ai-platform:3000:AI Platform API"
|
||||||
|
"blackroad-vllm:8000:vLLM Inference Server"
|
||||||
|
"blackroad-localai:8080:LocalAI Server"
|
||||||
|
"roadapi:3001:Core API Gateway"
|
||||||
|
"roadlog-monitoring:9090:Monitoring Dashboard"
|
||||||
|
"blackroad-minio:9000:Object Storage"
|
||||||
|
"roadauth:3002:Authentication Service"
|
||||||
|
"roadbilling:3003:Billing Service"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check Pi connectivity
|
||||||
|
echo "📡 Checking Pi Cluster Connectivity..."
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
available_pis=()
|
||||||
|
for pi_entry in "${PI_CLUSTER[@]}"; do
|
||||||
|
IFS=':' read -r pi_name pi_ip <<< "$pi_entry"
|
||||||
|
echo -n " Testing $pi_name ($pi_ip)... "
|
||||||
|
|
||||||
|
if ping -c 1 -W 2 "$pi_ip" &>/dev/null; then
|
||||||
|
echo "✅ Online"
|
||||||
|
available_pis+=("$pi_entry")
|
||||||
|
else
|
||||||
|
echo "❌ Offline"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "========================================="
|
||||||
|
echo "📊 Cluster Status"
|
||||||
|
echo "========================================="
|
||||||
|
echo "Total Pis: ${#PI_CLUSTER[@]}"
|
||||||
|
echo "Available: ${#available_pis[@]}"
|
||||||
|
echo "Offline: $((${#PI_CLUSTER[@]} - ${#available_pis[@]}))"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Continue even if Pis are offline - create deployment packages anyway
|
||||||
|
if [ ${#available_pis[@]} -eq 0 ]; then
|
||||||
|
echo "⚠️ No Pis currently online, but will create deployment packages..."
|
||||||
|
echo ""
|
||||||
|
# Use all Pis for package creation
|
||||||
|
available_pis=("${PI_CLUSTER[@]}")
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Deploy services to available Pis
|
||||||
|
echo "========================================="
|
||||||
|
echo "🚀 Creating Backend Service Packages"
|
||||||
|
echo "========================================="
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
deployed_count=0
|
||||||
|
failed_count=0
|
||||||
|
pi_index=0
|
||||||
|
|
||||||
|
for product_info in "${BACKEND_PRODUCTS[@]}"; do
|
||||||
|
IFS=':' read -r product port description <<< "$product_info"
|
||||||
|
|
||||||
|
# Round-robin distribution across Pis
|
||||||
|
pi_info="${available_pis[$pi_index]}"
|
||||||
|
IFS=':' read -r pi_name pi_ip <<< "$pi_info"
|
||||||
|
|
||||||
|
echo "📦 Preparing: $product"
|
||||||
|
echo " Description: $description"
|
||||||
|
echo " Target: $pi_name ($pi_ip)"
|
||||||
|
echo " Port: $port"
|
||||||
|
|
||||||
|
# Create deployment package
|
||||||
|
deployment_dir="$HOME/${product}-pi-deploy"
|
||||||
|
rm -rf "$deployment_dir"
|
||||||
|
mkdir -p "$deployment_dir/app"
|
||||||
|
|
||||||
|
# Create Docker Compose configuration
|
||||||
|
cat > "$deployment_dir/docker-compose.yml" <<EOF
|
||||||
|
version: '3.8'
|
||||||
|
|
||||||
|
services:
|
||||||
|
$product:
|
||||||
|
image: node:18-alpine
|
||||||
|
container_name: $product
|
||||||
|
restart: unless-stopped
|
||||||
|
ports:
|
||||||
|
- "$port:$port"
|
||||||
|
volumes:
|
||||||
|
- ./app:/app
|
||||||
|
working_dir: /app
|
||||||
|
command: node server.js
|
||||||
|
environment:
|
||||||
|
- NODE_ENV=production
|
||||||
|
- PORT=$port
|
||||||
|
- PI_NAME=$pi_name
|
||||||
|
- SERVICE_NAME=$product
|
||||||
|
networks:
|
||||||
|
- blackroad-net
|
||||||
|
|
||||||
|
networks:
|
||||||
|
blackroad-net:
|
||||||
|
driver: bridge
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Create Node.js server
|
||||||
|
cat > "$deployment_dir/app/server.js" <<EOF
|
||||||
|
const http = require('http');
|
||||||
|
const PORT = process.env.PORT || $port;
|
||||||
|
const PI_NAME = process.env.PI_NAME || 'unknown';
|
||||||
|
const SERVICE_NAME = process.env.SERVICE_NAME || '$product';
|
||||||
|
|
||||||
|
const server = http.createServer((req, res) => {
|
||||||
|
if (req.url === '/health') {
|
||||||
|
res.writeHead(200, {'Content-Type': 'application/json'});
|
||||||
|
res.end(JSON.stringify({status: 'healthy'}));
|
||||||
|
} else {
|
||||||
|
res.writeHead(200, {'Content-Type': 'application/json'});
|
||||||
|
res.end(JSON.stringify({
|
||||||
|
service: SERVICE_NAME,
|
||||||
|
description: '$description',
|
||||||
|
pi: PI_NAME,
|
||||||
|
status: 'running',
|
||||||
|
port: PORT,
|
||||||
|
uptime: process.uptime(),
|
||||||
|
timestamp: new Date().toISOString()
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
server.listen(PORT, '0.0.0.0', () => {
|
||||||
|
console.log(\`🥧 \${SERVICE_NAME} running on \${PI_NAME}:\${PORT}\`);
|
||||||
|
console.log(\`🔗 Access: http://\${PI_NAME}:\${PORT}\`);
|
||||||
|
});
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Create package.json
|
||||||
|
cat > "$deployment_dir/app/package.json" <<EOF
|
||||||
|
{
|
||||||
|
"name": "$product",
|
||||||
|
"version": "1.0.0",
|
||||||
|
"description": "$description",
|
||||||
|
"main": "server.js",
|
||||||
|
"scripts": {
|
||||||
|
"start": "node server.js"
|
||||||
|
},
|
||||||
|
"dependencies": {}
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Create deployment script
|
||||||
|
cat > "$deployment_dir/deploy.sh" <<'DEPLOY_SCRIPT'
|
||||||
|
#!/bin/bash
|
||||||
|
echo "🥧 Deploying to Pi..."
|
||||||
|
|
||||||
|
# Create network if doesn't exist
|
||||||
|
docker network create blackroad-net 2>/dev/null || true
|
||||||
|
|
||||||
|
# Stop existing container
|
||||||
|
docker-compose down 2>/dev/null || true
|
||||||
|
|
||||||
|
# Pull latest Node.js image
|
||||||
|
docker-compose pull
|
||||||
|
|
||||||
|
# Start service
|
||||||
|
docker-compose up -d
|
||||||
|
|
||||||
|
# Wait for service to start
|
||||||
|
sleep 3
|
||||||
|
|
||||||
|
# Check status
|
||||||
|
docker-compose ps
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "✅ Deployment complete!"
|
||||||
|
echo "🔍 Check logs: docker-compose logs -f"
|
||||||
|
echo "🩺 Health check: docker-compose exec $product curl http://localhost:$port/health"
|
||||||
|
DEPLOY_SCRIPT
|
||||||
|
|
||||||
|
chmod +x "$deployment_dir/deploy.sh"
|
||||||
|
|
||||||
|
# Create README
|
||||||
|
cat > "$deployment_dir/README.md" <<EOF
|
||||||
|
# $product - Pi Deployment
|
||||||
|
|
||||||
|
**Description:** $description
|
||||||
|
**Target Pi:** $pi_name ($pi_ip)
|
||||||
|
**Port:** $port
|
||||||
|
|
||||||
|
## Deployment Instructions
|
||||||
|
|
||||||
|
1. Copy files to Pi:
|
||||||
|
\`\`\`bash
|
||||||
|
scp -r $deployment_dir pi@$pi_ip:~/
|
||||||
|
\`\`\`
|
||||||
|
|
||||||
|
2. SSH into Pi:
|
||||||
|
\`\`\`bash
|
||||||
|
ssh pi@$pi_ip
|
||||||
|
\`\`\`
|
||||||
|
|
||||||
|
3. Deploy:
|
||||||
|
\`\`\`bash
|
||||||
|
cd $(basename $deployment_dir)
|
||||||
|
./deploy.sh
|
||||||
|
\`\`\`
|
||||||
|
|
||||||
|
4. Verify:
|
||||||
|
\`\`\`bash
|
||||||
|
curl http://localhost:$port
|
||||||
|
curl http://localhost:$port/health
|
||||||
|
\`\`\`
|
||||||
|
|
||||||
|
5. Monitor:
|
||||||
|
\`\`\`bash
|
||||||
|
docker logs -f $product
|
||||||
|
\`\`\`
|
||||||
|
|
||||||
|
## Management
|
||||||
|
|
||||||
|
- Stop: \`docker-compose down\`
|
||||||
|
- Restart: \`docker-compose restart\`
|
||||||
|
- Logs: \`docker-compose logs -f\`
|
||||||
|
- Status: \`docker-compose ps\`
|
||||||
|
EOF
|
||||||
|
|
||||||
|
((deployed_count++))
|
||||||
|
echo " ✅ Package created: $deployment_dir"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Move to next Pi (round-robin)
|
||||||
|
pi_index=$(( (pi_index + 1) % ${#available_pis[@]} ))
|
||||||
|
done
|
||||||
|
|
||||||
|
# Deploy vLLM for edge AI inference
|
||||||
|
echo "========================================="
|
||||||
|
echo "🤖 Edge AI Inference Package"
|
||||||
|
echo "========================================="
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
vllm_dir="$HOME/vllm-pi-edge"
|
||||||
|
rm -rf "$vllm_dir"
|
||||||
|
mkdir -p "$vllm_dir"
|
||||||
|
|
||||||
|
primary_pi="${available_pis[0]}"
|
||||||
|
IFS=':' read -r pi_name pi_ip <<< "$primary_pi"
|
||||||
|
|
||||||
|
echo "🎯 Creating vLLM package for $pi_name ($pi_ip)"
|
||||||
|
|
||||||
|
cat > "$vllm_dir/docker-compose.yml" <<EOF
|
||||||
|
version: '3.8'
|
||||||
|
|
||||||
|
services:
|
||||||
|
vllm:
|
||||||
|
image: vllm/vllm-openai:latest
|
||||||
|
container_name: vllm-edge
|
||||||
|
restart: unless-stopped
|
||||||
|
ports:
|
||||||
|
- "8000:8000"
|
||||||
|
volumes:
|
||||||
|
- ./models:/models
|
||||||
|
- ./cache:/root/.cache
|
||||||
|
environment:
|
||||||
|
- MODEL_NAME=TinyLlama/TinyLlama-1.1B-Chat-v1.0
|
||||||
|
- MAX_MODEL_LEN=2048
|
||||||
|
- TENSOR_PARALLEL_SIZE=1
|
||||||
|
command: --model TinyLlama/TinyLlama-1.1B-Chat-v1.0 --host 0.0.0.0 --port 8000 --max-model-len 2048
|
||||||
|
networks:
|
||||||
|
- blackroad-net
|
||||||
|
|
||||||
|
networks:
|
||||||
|
blackroad-net:
|
||||||
|
driver: bridge
|
||||||
|
EOF
|
||||||
|
|
||||||
|
cat > "$vllm_dir/README.md" <<EOF
|
||||||
|
# vLLM Edge AI Inference
|
||||||
|
|
||||||
|
**Target:** $pi_name ($pi_ip)
|
||||||
|
**Model:** TinyLlama 1.1B (optimized for Raspberry Pi)
|
||||||
|
**Port:** 8000
|
||||||
|
|
||||||
|
## Deploy:
|
||||||
|
\`\`\`bash
|
||||||
|
scp -r $vllm_dir pi@$pi_ip:~/
|
||||||
|
ssh pi@$pi_ip 'cd vllm-pi-edge && docker-compose up -d'
|
||||||
|
\`\`\`
|
||||||
|
|
||||||
|
## Test:
|
||||||
|
\`\`\`bash
|
||||||
|
curl http://$pi_ip:8000/v1/models
|
||||||
|
\`\`\`
|
||||||
|
EOF
|
||||||
|
|
||||||
|
echo " ✅ vLLM package created: $vllm_dir"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# MinIO distributed storage
|
||||||
|
echo "========================================="
|
||||||
|
echo "📦 Distributed Storage Package"
|
||||||
|
echo "========================================="
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
minio_dir="$HOME/minio-distributed"
|
||||||
|
rm -rf "$minio_dir"
|
||||||
|
mkdir -p "$minio_dir"
|
||||||
|
|
||||||
|
echo "🎯 Creating MinIO distributed storage configuration"
|
||||||
|
|
||||||
|
cat > "$minio_dir/docker-compose.yml" <<EOF
|
||||||
|
version: '3.8'
|
||||||
|
|
||||||
|
services:
|
||||||
|
minio:
|
||||||
|
image: minio/minio:latest
|
||||||
|
container_name: minio-distributed
|
||||||
|
restart: unless-stopped
|
||||||
|
ports:
|
||||||
|
- "9000:9000"
|
||||||
|
- "9001:9001"
|
||||||
|
volumes:
|
||||||
|
- ./data:/data
|
||||||
|
environment:
|
||||||
|
- MINIO_ROOT_USER=blackroad
|
||||||
|
- MINIO_ROOT_PASSWORD=blackroad-secure-2026
|
||||||
|
command: server /data --console-address ":9001"
|
||||||
|
networks:
|
||||||
|
- blackroad-net
|
||||||
|
|
||||||
|
networks:
|
||||||
|
blackroad-net:
|
||||||
|
driver: bridge
|
||||||
|
EOF
|
||||||
|
|
||||||
|
cat > "$minio_dir/README.md" <<EOF
|
||||||
|
# MinIO Distributed Storage
|
||||||
|
|
||||||
|
Deploy to each Pi for distributed object storage.
|
||||||
|
|
||||||
|
## Deploy to all Pis:
|
||||||
|
\`\`\`bash
|
||||||
|
for pi in ${PI_CLUSTER[@]}; do
|
||||||
|
IFS=':' read -r name ip <<< "\$pi"
|
||||||
|
echo "Deploying to \$name (\$ip)..."
|
||||||
|
scp -r $minio_dir pi@\$ip:~/
|
||||||
|
ssh pi@\$ip 'cd minio-distributed && docker-compose up -d'
|
||||||
|
done
|
||||||
|
\`\`\`
|
||||||
|
|
||||||
|
## Access:
|
||||||
|
- Console: http://[PI_IP]:9001
|
||||||
|
- API: http://[PI_IP]:9000
|
||||||
|
- User: blackroad
|
||||||
|
- Pass: blackroad-secure-2026
|
||||||
|
EOF
|
||||||
|
|
||||||
|
echo " ✅ MinIO package created: $minio_dir"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Create master deployment script
|
||||||
|
echo "========================================="
|
||||||
|
echo "🎯 Master Deployment Script"
|
||||||
|
echo "========================================="
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
cat > "$HOME/deploy-all-to-pis.sh" <<'MASTER_SCRIPT'
|
||||||
|
#!/bin/bash
|
||||||
|
# 🥧 Master Pi Deployment Script
|
||||||
|
|
||||||
|
PI_CLUSTER=(
|
||||||
|
"lucidia:192.168.4.38"
|
||||||
|
"blackroad-pi:192.168.4.64"
|
||||||
|
"lucidia-alt:192.168.4.99"
|
||||||
|
)
|
||||||
|
|
||||||
|
echo "🥧 BlackRoad Pi Cluster - Master Deployment"
|
||||||
|
echo "==========================================="
|
||||||
|
echo ""
|
||||||
|
echo "This will deploy all services to the Pi cluster."
|
||||||
|
echo ""
|
||||||
|
read -p "Continue? (y/n) " -n 1 -r
|
||||||
|
echo
|
||||||
|
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||||
|
echo "Deployment cancelled."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Deploy backend services
|
||||||
|
for deploy_dir in $HOME/*-pi-deploy; do
|
||||||
|
if [ -d "$deploy_dir" ]; then
|
||||||
|
service_name=$(basename "$deploy_dir" | sed 's/-pi-deploy//')
|
||||||
|
echo ""
|
||||||
|
echo "📦 Deploying $service_name..."
|
||||||
|
|
||||||
|
# Find target Pi from README
|
||||||
|
target_ip=$(grep "Target Pi:" "$deploy_dir/README.md" | grep -oE '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | head -1)
|
||||||
|
|
||||||
|
if [ -n "$target_ip" ]; then
|
||||||
|
echo " Target: $target_ip"
|
||||||
|
echo " Copying files..."
|
||||||
|
|
||||||
|
scp -r "$deploy_dir" "pi@$target_ip:~/" 2>/dev/null && \
|
||||||
|
ssh "pi@$target_ip" "cd $(basename $deploy_dir) && ./deploy.sh" 2>/dev/null && \
|
||||||
|
echo " ✅ Deployed successfully" || \
|
||||||
|
echo " ⚠️ Deployment failed (Pi may be offline or SSH not configured)"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "==========================================="
|
||||||
|
echo "✅ Master deployment complete!"
|
||||||
|
echo "🖤🛣️"
|
||||||
|
MASTER_SCRIPT
|
||||||
|
|
||||||
|
chmod +x "$HOME/deploy-all-to-pis.sh"
|
||||||
|
|
||||||
|
echo " ✅ Master script created: ~/deploy-all-to-pis.sh"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Summary
|
||||||
|
echo "========================================="
|
||||||
|
echo "📊 Deployment Package Summary"
|
||||||
|
echo "========================================="
|
||||||
|
echo ""
|
||||||
|
echo "✅ Backend Services: $deployed_count packages created"
|
||||||
|
echo "✅ Edge AI: vLLM package ready"
|
||||||
|
echo "✅ Storage: MinIO package ready"
|
||||||
|
echo "✅ Master Script: ~/deploy-all-to-pis.sh"
|
||||||
|
echo ""
|
||||||
|
echo "📦 Package Locations:"
|
||||||
|
for deploy_dir in $HOME/*-pi-deploy; do
|
||||||
|
if [ -d "$deploy_dir" ]; then
|
||||||
|
echo " - $(basename $deploy_dir)"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
echo " - vllm-pi-edge"
|
||||||
|
echo " - minio-distributed"
|
||||||
|
echo ""
|
||||||
|
echo "🚀 Quick Deploy Options:"
|
||||||
|
echo ""
|
||||||
|
echo "1. Deploy everything (when Pis are online):"
|
||||||
|
echo " ~/deploy-all-to-pis.sh"
|
||||||
|
echo ""
|
||||||
|
echo "2. Deploy individual service:"
|
||||||
|
echo " cd ~/<service>-pi-deploy && cat README.md"
|
||||||
|
echo ""
|
||||||
|
echo "3. Test Pi connectivity:"
|
||||||
|
echo " for pi in 192.168.4.38 192.168.4.64 192.168.4.99; do ping -c 1 \$pi; done"
|
||||||
|
echo ""
|
||||||
|
echo "📝 Next Steps:"
|
||||||
|
echo " 1. Ensure Pis are powered on and connected to network"
|
||||||
|
echo " 2. Configure SSH keys: ssh-copy-id pi@[PI_IP]"
|
||||||
|
echo " 3. Install Docker on each Pi: ssh pi@[PI_IP] 'curl -fsSL https://get.docker.com | sh'"
|
||||||
|
echo " 4. Run master deployment: ~/deploy-all-to-pis.sh"
|
||||||
|
echo ""
|
||||||
|
echo "🖤🛣️ Pi Cluster Deployment Packages Ready!"
|
||||||
31
scripts/docker-setup.sh
Normal file
31
scripts/docker-setup.sh
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
clear
|
||||||
|
cat <<'MENU'
|
||||||
|
|
||||||
|
🐳🐳🐳 DOCKER 🐳🐳🐳
|
||||||
|
|
||||||
|
📦 1 Running Containers
|
||||||
|
🖼️ 2 Images
|
||||||
|
🌐 3 Networks
|
||||||
|
💽 4 Volumes
|
||||||
|
🔄 5 Restart Container
|
||||||
|
🗑️ 6 Prune All
|
||||||
|
📊 7 Stats (live)
|
||||||
|
📋 8 Compose Up
|
||||||
|
🔙 0 ← Main Menu
|
||||||
|
|
||||||
|
MENU
|
||||||
|
read -p " ⌨️ > " c
|
||||||
|
case $c in
|
||||||
|
1) docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" 2>/dev/null || echo " ⚠️ Docker not running"; read -p " ↩ ";;
|
||||||
|
2) docker images --format "table {{.Repository}}\t{{.Tag}}\t{{.Size}}" 2>/dev/null; read -p " ↩ ";;
|
||||||
|
3) docker network ls 2>/dev/null; read -p " ↩ ";;
|
||||||
|
4) docker volume ls 2>/dev/null; read -p " ↩ ";;
|
||||||
|
5) read -p " 🔄 Container: " cn; docker restart "$cn" && echo " ✅ Restarted" || echo " ❌ Failed"; read -p " ↩ ";;
|
||||||
|
6) echo " 🗑️ Pruning..."; docker system prune -f 2>/dev/null; read -p " ↩ ";;
|
||||||
|
7) docker stats --no-stream 2>/dev/null; read -p " ↩ ";;
|
||||||
|
8) read -p " 📋 Compose dir: " d; cd "$d" && docker compose up -d 2>/dev/null || echo " ❌ Failed"; read -p " ↩ ";;
|
||||||
|
0) exec ./menu.sh;;
|
||||||
|
*) echo " ❌"; sleep 1;;
|
||||||
|
esac
|
||||||
|
exec ./docker.sh
|
||||||
29
scripts/k3s-setup.sh
Normal file
29
scripts/k3s-setup.sh
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
clear
|
||||||
|
cat <<'MENU'
|
||||||
|
|
||||||
|
☸️☸️☸️ K3S CLUSTER ☸️☸️☸️
|
||||||
|
|
||||||
|
📊 1 Cluster Info
|
||||||
|
🖥️ 2 Nodes
|
||||||
|
📦 3 Pods (all ns)
|
||||||
|
🌐 4 Services
|
||||||
|
🔀 5 Ingress / Traefik
|
||||||
|
📋 6 Deployments
|
||||||
|
💾 7 PVCs
|
||||||
|
🔙 0 ← Main Menu
|
||||||
|
|
||||||
|
MENU
|
||||||
|
read -p " ⌨️ > " c
|
||||||
|
case $c in
|
||||||
|
1) kubectl cluster-info 2>/dev/null || echo " ⚠️ kubectl not configured"; read -p " ↩ ";;
|
||||||
|
2) kubectl get nodes -o wide 2>/dev/null; read -p " ↩ ";;
|
||||||
|
3) kubectl get pods -A 2>/dev/null; read -p " ↩ ";;
|
||||||
|
4) kubectl get svc -A 2>/dev/null; read -p " ↩ ";;
|
||||||
|
5) kubectl get ingress -A 2>/dev/null; read -p " ↩ ";;
|
||||||
|
6) kubectl get deployments -A 2>/dev/null; read -p " ↩ ";;
|
||||||
|
7) kubectl get pvc -A 2>/dev/null; read -p " ↩ ";;
|
||||||
|
0) exec ./menu.sh;;
|
||||||
|
*) echo " ❌"; sleep 1;;
|
||||||
|
esac
|
||||||
|
exec ./k3s.sh
|
||||||
45
scripts/pi-fleet.sh
Normal file
45
scripts/pi-fleet.sh
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
clear
|
||||||
|
cat <<'MENU'
|
||||||
|
|
||||||
|
🍓🍓🍓 PI FLEET 🍓🍓🍓
|
||||||
|
|
||||||
|
📊 1 Fleet Status (ping all)
|
||||||
|
🖥️ 2 alice Pi 400 192.168.4.49
|
||||||
|
🖥️ 3 aria Pi 5 EC 192.168.4.64
|
||||||
|
🖥️ 4 octavia Pi 5 PM+H8 192.168.4.74
|
||||||
|
🖥️ 5 lucidia Pi 5 EC 192.168.4.38
|
||||||
|
🖥️ 6 anastasia Pi 5 PM+H8 ---.---.-.--
|
||||||
|
🖥️ 7 olympia Pi 4B PiKVM ---.---.-.--
|
||||||
|
🌡️ 8 Fleet Temps
|
||||||
|
💾 9 Fleet Disk Usage
|
||||||
|
🔙 0 ← Main Menu
|
||||||
|
|
||||||
|
MENU
|
||||||
|
read -p " ⌨️ > " c
|
||||||
|
case $c in
|
||||||
|
1) echo " 📊 Pinging fleet..."
|
||||||
|
for h in "alice:192.168.4.49" "aria:192.168.4.64" "octavia:192.168.4.74" "lucidia:192.168.4.38"; do
|
||||||
|
name="${h%%:*}"; ip="${h##*:}"
|
||||||
|
ping -c1 -W1 "$ip" &>/dev/null && echo " ✅ $name ($ip)" || echo " ❌ $name ($ip)"
|
||||||
|
done; read -p " ↩ ";;
|
||||||
|
2) echo " 🖥️ alice — Pi 400 — Gateway/DNS"; ssh pi@192.168.4.49 "hostname; uptime; vcgencmd measure_temp" 2>/dev/null || echo " ⚠️ Offline"; read -p " ↩ ";;
|
||||||
|
3) echo " 🖥️ aria — Pi 5 ElectroCookie"; ssh pi@192.168.4.64 "hostname; uptime; vcgencmd measure_temp" 2>/dev/null || echo " ⚠️ Offline"; read -p " ↩ ";;
|
||||||
|
4) echo " 🖥️ octavia — Pi 5 Pironman+Hailo-8"; ssh pi@192.168.4.74 "hostname; uptime; vcgencmd measure_temp" 2>/dev/null || echo " ⚠️ Offline"; read -p " ↩ ";;
|
||||||
|
5) echo " 🖥️ lucidia — Pi 5 ElectroCookie"; ssh pi@192.168.4.38 "hostname; uptime; vcgencmd measure_temp" 2>/dev/null || echo " ⚠️ Offline"; read -p " ↩ ";;
|
||||||
|
6) echo " ⚠️ anastasia IP TBD"; read -p " ↩ ";;
|
||||||
|
7) echo " ⚠️ olympia (PiKVM) IP TBD"; read -p " ↩ ";;
|
||||||
|
8) echo " 🌡️ Fleet Temperatures:"
|
||||||
|
for h in "alice:192.168.4.49" "aria:192.168.4.64" "octavia:192.168.4.74" "lucidia:192.168.4.38"; do
|
||||||
|
name="${h%%:*}"; ip="${h##*:}"
|
||||||
|
t=$(ssh -o ConnectTimeout=2 pi@"$ip" "vcgencmd measure_temp" 2>/dev/null) && echo " $name: $t" || echo " $name: ⚠️ offline"
|
||||||
|
done; read -p " ↩ ";;
|
||||||
|
9) echo " 💾 Fleet Disk:"
|
||||||
|
for h in "alice:192.168.4.49" "aria:192.168.4.64" "octavia:192.168.4.74" "lucidia:192.168.4.38"; do
|
||||||
|
name="${h%%:*}"; ip="${h##*:}"
|
||||||
|
d=$(ssh -o ConnectTimeout=2 pi@"$ip" "df -h / | tail -1 | awk '{print \$3\"/\"\$2\" (\"\$5\")\"}'" 2>/dev/null) && echo " $name: $d" || echo " $name: ⚠️ offline"
|
||||||
|
done; read -p " ↩ ";;
|
||||||
|
0) exec ./menu.sh;;
|
||||||
|
*) echo " ❌"; sleep 1;;
|
||||||
|
esac
|
||||||
|
exec ./pifleet.sh
|
||||||
91
scripts/setup-dns.sh
Normal file
91
scripts/setup-dns.sh
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Cloudflare DNS Setup for blackroad.io → Railway
|
||||||
|
# Run this AFTER Railway services are deployed
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
CF_TOKEN='yP5h0HvsXX0BpHLs01tLmgtTbQurIKPL4YnQfIwy'
|
||||||
|
CF_ZONE='848cf0b18d51e0170e0d1537aec3505a'
|
||||||
|
|
||||||
|
echo "☁️ Setting up Cloudflare DNS for blackroad.io..."
|
||||||
|
|
||||||
|
# Function to create/update DNS record
|
||||||
|
create_dns_record() {
|
||||||
|
local name="$1"
|
||||||
|
local target="$2"
|
||||||
|
local proxy="${3:-true}"
|
||||||
|
|
||||||
|
echo " 📝 Creating CNAME: ${name}.blackroad.io → $target"
|
||||||
|
|
||||||
|
# Check if record exists
|
||||||
|
EXISTING=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones/${CF_ZONE}/dns_records?name=${name}.blackroad.io" \
|
||||||
|
-H "Authorization: Bearer ${CF_TOKEN}" \
|
||||||
|
-H "Content-Type: application/json")
|
||||||
|
|
||||||
|
RECORD_ID=$(echo "$EXISTING" | jq -r '.result[0].id // empty')
|
||||||
|
|
||||||
|
if [ -n "$RECORD_ID" ]; then
|
||||||
|
# Update existing record
|
||||||
|
curl -s -X PUT "https://api.cloudflare.com/client/v4/zones/${CF_ZONE}/dns_records/${RECORD_ID}" \
|
||||||
|
-H "Authorization: Bearer ${CF_TOKEN}" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d "{
|
||||||
|
\"type\": \"CNAME\",
|
||||||
|
\"name\": \"${name}\",
|
||||||
|
\"content\": \"${target}\",
|
||||||
|
\"proxied\": ${proxy},
|
||||||
|
\"ttl\": 1
|
||||||
|
}" | jq -r '.success'
|
||||||
|
else
|
||||||
|
# Create new record
|
||||||
|
curl -s -X POST "https://api.cloudflare.com/client/v4/zones/${CF_ZONE}/dns_records" \
|
||||||
|
-H "Authorization: Bearer ${CF_TOKEN}" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d "{
|
||||||
|
\"type\": \"CNAME\",
|
||||||
|
\"name\": \"${name}\",
|
||||||
|
\"content\": \"${target}\",
|
||||||
|
\"proxied\": ${proxy},
|
||||||
|
\"ttl\": 1
|
||||||
|
}" | jq -r '.success'
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "⚠️ IMPORTANT: Get your Railway domains first!"
|
||||||
|
echo ""
|
||||||
|
echo "Go to Railway dashboard and copy the domains for each service:"
|
||||||
|
echo " web service → something like: web-production.up.railway.app"
|
||||||
|
echo " api service → something like: api-production.up.railway.app"
|
||||||
|
echo ""
|
||||||
|
read -p "Enter the Railway domain for WEB (e.g., web-production.up.railway.app): " WEB_DOMAIN
|
||||||
|
read -p "Enter the Railway domain for API (e.g., api-production.up.railway.app): " API_DOMAIN
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "🚀 Creating DNS records..."
|
||||||
|
|
||||||
|
# Create www subdomain
|
||||||
|
create_dns_record "www" "$WEB_DOMAIN" "true"
|
||||||
|
|
||||||
|
# Create root domain
|
||||||
|
create_dns_record "@" "$WEB_DOMAIN" "true"
|
||||||
|
|
||||||
|
# Create api subdomain
|
||||||
|
create_dns_record "api" "$API_DOMAIN" "true"
|
||||||
|
|
||||||
|
# Optional: app subdomain (if you want app.blackroad.io)
|
||||||
|
# create_dns_record "app" "$WEB_DOMAIN" "true"
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "✅ DNS records created successfully!"
|
||||||
|
echo ""
|
||||||
|
echo "🔗 Your domains should be live shortly:"
|
||||||
|
echo " https://www.blackroad.io"
|
||||||
|
echo " https://blackroad.io"
|
||||||
|
echo " https://api.blackroad.io"
|
||||||
|
echo ""
|
||||||
|
echo "⏱ DNS propagation may take 5-10 minutes"
|
||||||
|
echo "🔒 SSL certificates will be auto-generated by Railway"
|
||||||
|
echo ""
|
||||||
|
echo "📋 Next: Test your endpoints!"
|
||||||
118
scripts/setup-tunnels.sh
Normal file
118
scripts/setup-tunnels.sh
Normal file
@@ -0,0 +1,118 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# BlackRoad Infrastructure - Cloudflare Tunnel Setup
|
||||||
|
# Sets up tunnels for: alice (Pi), shellfish (DigitalOcean), lucidia (Pi)
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
echo "🌐 BlackRoad Cloudflare Tunnel Setup"
|
||||||
|
echo "======================================"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Host configuration
|
||||||
|
declare -A HOSTS
|
||||||
|
HOSTS[alice]="192.168.4.49"
|
||||||
|
HOSTS[shellfish]="174.138.44.45"
|
||||||
|
HOSTS[lucidia]="192.168.4.38"
|
||||||
|
|
||||||
|
echo "📋 Configured Hosts:"
|
||||||
|
for host in "${!HOSTS[@]}"; do
|
||||||
|
echo " • $host: ${HOSTS[$host]}"
|
||||||
|
done
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Check if wrangler is authenticated
|
||||||
|
echo "🔐 Checking Cloudflare authentication..."
|
||||||
|
if ! wrangler whoami &>/dev/null; then
|
||||||
|
echo "❌ Not authenticated with Cloudflare. Run: wrangler login"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "✅ Authenticated"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Create tunnel configuration
|
||||||
|
echo "📝 Creating tunnel configuration..."
|
||||||
|
|
||||||
|
cat > /tmp/blackroad-tunnel-config.yml <<'EOF'
|
||||||
|
# BlackRoad Infrastructure Tunnels
|
||||||
|
tunnel: blackroad-infrastructure
|
||||||
|
credentials-file: /Users/alexa/.cloudflared/blackroad-infrastructure.json
|
||||||
|
|
||||||
|
ingress:
|
||||||
|
# Alice - Raspberry Pi (192.168.4.49)
|
||||||
|
- hostname: alice.blackroad.io
|
||||||
|
service: http://192.168.4.49:80
|
||||||
|
- hostname: alice-api.blackroad.io
|
||||||
|
service: http://192.168.4.49:3000
|
||||||
|
|
||||||
|
# Shellfish - DigitalOcean (174.138.44.45)
|
||||||
|
- hostname: shellfish.blackroad.io
|
||||||
|
service: http://174.138.44.45:80
|
||||||
|
- hostname: api.blackroad.io
|
||||||
|
service: http://174.138.44.45:3000
|
||||||
|
|
||||||
|
# Lucidia - Raspberry Pi (192.168.4.38)
|
||||||
|
- hostname: lucidia.blackroad.io
|
||||||
|
service: http://192.168.4.38:80
|
||||||
|
- hostname: lucidia-api.blackroad.io
|
||||||
|
service: http://192.168.4.38:3000
|
||||||
|
|
||||||
|
# Catch-all
|
||||||
|
- service: http_status:404
|
||||||
|
EOF
|
||||||
|
|
||||||
|
echo "✅ Tunnel configuration created"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Instructions for tunnel setup
|
||||||
|
cat <<'INSTRUCTIONS'
|
||||||
|
🚀 Next Steps:
|
||||||
|
==============
|
||||||
|
|
||||||
|
1. Install cloudflared on each host:
|
||||||
|
|
||||||
|
Alice & Lucidia (Raspberry Pi):
|
||||||
|
$ ssh alice
|
||||||
|
$ wget https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-linux-arm64
|
||||||
|
$ sudo mv cloudflared-linux-arm64 /usr/local/bin/cloudflared
|
||||||
|
$ sudo chmod +x /usr/local/bin/cloudflared
|
||||||
|
|
||||||
|
Shellfish (DigitalOcean):
|
||||||
|
$ ssh shellfish
|
||||||
|
$ wget https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-linux-amd64
|
||||||
|
$ sudo mv cloudflared-linux-amd64 /usr/local/bin/cloudflared
|
||||||
|
$ sudo chmod +x /usr/local/bin/cloudflared
|
||||||
|
|
||||||
|
2. Create tunnel (run locally):
|
||||||
|
$ cloudflared tunnel create blackroad-infrastructure
|
||||||
|
|
||||||
|
3. Copy credentials to each host:
|
||||||
|
$ scp ~/.cloudflared/blackroad-infrastructure.json alice:/home/pi/.cloudflared/
|
||||||
|
$ scp ~/.cloudflared/blackroad-infrastructure.json shellfish:/root/.cloudflared/
|
||||||
|
$ scp ~/.cloudflared/blackroad-infrastructure.json lucidia:/home/pi/.cloudflared/
|
||||||
|
|
||||||
|
4. Copy config to each host:
|
||||||
|
$ scp /tmp/blackroad-tunnel-config.yml alice:/home/pi/.cloudflared/config.yml
|
||||||
|
$ scp /tmp/blackroad-tunnel-config.yml shellfish:/root/.cloudflared/config.yml
|
||||||
|
$ scp /tmp/blackroad-tunnel-config.yml lucidia:/home/pi/.cloudflared/config.yml
|
||||||
|
|
||||||
|
5. Configure DNS (run locally):
|
||||||
|
$ cloudflared tunnel route dns blackroad-infrastructure alice.blackroad.io
|
||||||
|
$ cloudflared tunnel route dns blackroad-infrastructure shellfish.blackroad.io
|
||||||
|
$ cloudflared tunnel route dns blackroad-infrastructure lucidia.blackroad.io
|
||||||
|
$ cloudflared tunnel route dns blackroad-infrastructure api.blackroad.io
|
||||||
|
|
||||||
|
6. Start tunnel as service on each host:
|
||||||
|
$ ssh alice "sudo cloudflared service install && sudo systemctl start cloudflared"
|
||||||
|
$ ssh shellfish "sudo cloudflared service install && sudo systemctl start cloudflared"
|
||||||
|
$ ssh lucidia "sudo cloudflared service install && sudo systemctl start cloudflared"
|
||||||
|
|
||||||
|
📊 Host Summary:
|
||||||
|
- Alice: 192.168.4.49 (Raspberry Pi) → alice.blackroad.io
|
||||||
|
- Shellfish: 174.138.44.45 (DigitalOcean) → shellfish.blackroad.io, api.blackroad.io
|
||||||
|
- Lucidia: 192.168.4.38 (Raspberry Pi) → lucidia.blackroad.io
|
||||||
|
|
||||||
|
INSTRUCTIONS
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "✅ Setup script complete!"
|
||||||
|
echo " Config saved to: /tmp/blackroad-tunnel-config.yml"
|
||||||
27
scripts/tailscale-mesh.sh
Normal file
27
scripts/tailscale-mesh.sh
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
clear
|
||||||
|
cat <<'MENU'
|
||||||
|
|
||||||
|
🔗🔗🔗 TAILSCALE 🔗🔗🔗
|
||||||
|
|
||||||
|
📊 1 Status
|
||||||
|
📋 2 Peer List
|
||||||
|
🏓 3 Ping Peer
|
||||||
|
🌐 4 IP Info
|
||||||
|
🔌 5 Connect / Up
|
||||||
|
⏹️ 6 Disconnect / Down
|
||||||
|
🔙 0 ← Main Menu
|
||||||
|
|
||||||
|
MENU
|
||||||
|
read -p " ⌨️ > " c
|
||||||
|
case $c in
|
||||||
|
1) tailscale status 2>/dev/null || echo " ⚠️ Tailscale not running"; read -p " ↩ ";;
|
||||||
|
2) tailscale status --peers 2>/dev/null; read -p " ↩ ";;
|
||||||
|
3) read -p " 🏓 Peer name/IP: " p; tailscale ping "$p" 2>/dev/null; read -p " ↩ ";;
|
||||||
|
4) tailscale ip -4 2>/dev/null; tailscale ip -6 2>/dev/null; read -p " ↩ ";;
|
||||||
|
5) sudo tailscale up 2>/dev/null && echo " ✅ Connected" || echo " ❌ Failed"; read -p " ↩ ";;
|
||||||
|
6) sudo tailscale down 2>/dev/null && echo " ⏹️ Disconnected"; read -p " ↩ ";;
|
||||||
|
0) exec ./menu.sh;;
|
||||||
|
*) echo " ❌"; sleep 1;;
|
||||||
|
esac
|
||||||
|
exec ./tailscale.sh
|
||||||
Reference in New Issue
Block a user