Add 12 Python utilities and 4 shell scripts

Python: core engine, command center, executor, persistence layer,
terminal OS, multi-language LLM templates, response templates,
codex scraper/symbolic/verification, autonomous pipeline, swarm
intelligence

Scripts: RoadChain format converter, network interceptor, pixel
metaverse engine, Clerk auth integration

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Alexa Amundson
2026-02-20 20:28:37 -06:00
parent 4722b2bf91
commit 3e155af71c
16 changed files with 10491 additions and 0 deletions

587
scripts/clerk-auth.sh Normal file
View File

@@ -0,0 +1,587 @@
#!/bin/bash
# 🔐 Integrate Clerk Authentication Across All BlackRoad Products
# Adds enterprise-grade authentication to all 24 products
echo "🔐 BlackRoad Clerk Authentication Integration"
echo "========================================="
echo ""
# All 24 products
PRODUCTS=(
"roadauth"
"roadapi"
"roadbilling"
"blackroad-ai-platform"
"blackroad-langchain-studio"
"roadsupport"
"blackroad-admin-portal"
"blackroad-meet"
"blackroad-minio"
"blackroad-docs-site"
"blackroad-vllm"
"blackroad-keycloak"
"roadlog-monitoring"
"roadvpn"
"blackroad-localai"
"roadnote"
"roadscreen"
"genesis-road"
"roadgateway"
"roadmobile"
"roadcli"
"roadauth-pro"
"roadstudio"
"roadmarket"
)
integrated_count=0
skipped_count=0
echo "📝 Clerk Configuration Setup"
echo "========================================="
echo ""
# Create Clerk configuration template
cat > "$HOME/clerk-config.json" <<EOF
{
"clerk": {
"publishableKey": "pk_test_YOUR_PUBLISHABLE_KEY",
"secretKey": "sk_test_YOUR_SECRET_KEY",
"frontendApi": "clerk.YOUR_DOMAIN.com",
"features": {
"socialLogin": ["google", "github", "apple"],
"mfa": true,
"passwordless": true,
"organizations": true
},
"appearance": {
"theme": "dark",
"variables": {
"colorPrimary": "#F5A623",
"colorDanger": "#FF1D6C",
"colorSuccess": "#2ECC71",
"colorWarning": "#F5A623",
"fontFamily": "SF Pro Display, -apple-system, sans-serif",
"borderRadius": "13px"
}
}
}
}
EOF
echo "✅ Clerk configuration template created: ~/clerk-config.json"
echo ""
# Create Clerk integration script for each product
echo "========================================="
echo "🔧 Creating Clerk Integration Components"
echo "========================================="
echo ""
for product in "${PRODUCTS[@]}"; do
product_dir="$HOME/$product"
if [ ! -d "$product_dir" ]; then
echo "⚠️ $product - Directory not found, SKIPPING"
((skipped_count++))
continue
fi
echo "📦 Integrating: $product"
# Create clerk-integration directory
clerk_dir="$product_dir/clerk-integration"
mkdir -p "$clerk_dir"
# Create Clerk HTML components
cat > "$clerk_dir/clerk-auth.html" <<'CLERK_HTML'
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Sign In - BlackRoad</title>
<style>
* { margin: 0; padding: 0; box-sizing: border-box; }
body {
font-family: 'SF Pro Display', -apple-system, sans-serif;
background: #000;
color: #FFF;
min-height: 100vh;
display: flex;
justify-content: center;
align-items: center;
}
.auth-container {
width: 100%;
max-width: 450px;
padding: 34px;
}
.auth-header {
text-align: center;
margin-bottom: 34px;
}
.auth-logo {
font-size: 55px;
margin-bottom: 21px;
}
.auth-title {
font-size: 34px;
font-weight: 700;
background: linear-gradient(135deg, #F5A623 38.2%, #2979FF 61.8%);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
margin-bottom: 13px;
}
.auth-subtitle {
font-size: 16px;
opacity: 0.7;
}
#clerk-auth {
background: rgba(255,255,255,0.03);
border: 1px solid rgba(255,255,255,0.1);
border-radius: 13px;
padding: 34px;
}
.loading {
text-align: center;
padding: 55px;
opacity: 0.7;
}
</style>
</head>
<body>
<div class="auth-container">
<div class="auth-header">
<div class="auth-logo">🖤</div>
<h1 class="auth-title">Welcome to BlackRoad</h1>
<p class="auth-subtitle">Sign in to continue</p>
</div>
<div id="clerk-auth">
<div class="loading">Loading authentication...</div>
</div>
</div>
<!-- Clerk SDK -->
<script
async
crossorigin="anonymous"
data-clerk-publishable-key="pk_test_YOUR_PUBLISHABLE_KEY"
src="https://YOUR_FRONTEND_API/npm/@clerk/clerk-js@latest/dist/clerk.browser.js"
type="text/javascript"
></script>
<script>
window.addEventListener('load', async () => {
await Clerk.load();
const signInDiv = document.getElementById('clerk-auth');
// Mount Clerk sign-in component
Clerk.mountSignIn(signInDiv, {
appearance: {
elements: {
rootBox: 'w-full',
card: 'bg-transparent border-none shadow-none'
},
variables: {
colorPrimary: '#F5A623',
colorText: '#FFFFFF',
colorBackground: '#000000',
fontFamily: 'SF Pro Display, -apple-system, sans-serif'
}
},
afterSignInUrl: '/dashboard',
signUpUrl: '/sign-up'
});
// Check if user is already signed in
if (Clerk.user) {
window.location.href = '/dashboard';
}
});
</script>
</body>
</html>
CLERK_HTML
# Create protected route wrapper
cat > "$clerk_dir/clerk-protected.js" <<'CLERK_JS'
// 🔐 Clerk Protected Route Wrapper
// Add this to any page that requires authentication
(function() {
// Wait for Clerk to load
window.addEventListener('load', async () => {
// Check if Clerk is loaded
if (typeof Clerk === 'undefined') {
console.error('Clerk not loaded');
return;
}
await Clerk.load();
// Check if user is authenticated
if (!Clerk.user) {
// Redirect to sign-in
window.location.href = '/clerk-integration/clerk-auth.html';
return;
}
// User is authenticated
console.log('✅ User authenticated:', Clerk.user.fullName);
// Add user info to page
addUserInfo(Clerk.user);
// Add sign-out button
addSignOutButton();
});
function addUserInfo(user) {
const userInfoDiv = document.createElement('div');
userInfoDiv.id = 'clerk-user-info';
userInfoDiv.style.cssText = `
position: fixed;
top: 21px;
right: 21px;
background: rgba(255,255,255,0.05);
border: 1px solid rgba(255,255,255,0.1);
border-radius: 13px;
padding: 13px 21px;
display: flex;
align-items: center;
gap: 13px;
z-index: 1000;
`;
userInfoDiv.innerHTML = `
<img src="${user.profileImageUrl}" alt="${user.fullName}"
style="width: 34px; height: 34px; border-radius: 50%;">
<div>
<div style="font-weight: 600; font-size: 14px;">${user.fullName || user.username}</div>
<div style="font-size: 12px; opacity: 0.7;">${user.primaryEmailAddress.emailAddress}</div>
</div>
`;
document.body.appendChild(userInfoDiv);
}
function addSignOutButton() {
const signOutBtn = document.createElement('button');
signOutBtn.textContent = 'Sign Out';
signOutBtn.style.cssText = `
position: fixed;
top: 89px;
right: 21px;
background: linear-gradient(135deg, #F5A623 38.2%, #FF1D6C 61.8%);
color: white;
border: none;
border-radius: 8px;
padding: 8px 21px;
font-size: 14px;
font-weight: 600;
cursor: pointer;
z-index: 1000;
`;
signOutBtn.addEventListener('click', async () => {
await Clerk.signOut();
window.location.href = '/clerk-integration/clerk-auth.html';
});
document.body.appendChild(signOutBtn);
}
})();
CLERK_JS
# Create integration README
cat > "$clerk_dir/README.md" <<EOF
# Clerk Authentication Integration for $product
## Setup Instructions
### 1. Get Clerk API Keys
1. Sign up at [clerk.com](https://clerk.com)
2. Create a new application
3. Get your publishable key (pk_test_...) and secret key (sk_test_...)
4. Update the keys in:
- \`clerk-auth.html\` (line 66)
- Main \`index.html\` (add Clerk SDK)
### 2. Update Main HTML File
Add Clerk SDK to \`index.html\` before closing \`</body>\`:
\`\`\`html
<!-- Clerk SDK -->
<script
async
crossorigin="anonymous"
data-clerk-publishable-key="pk_test_YOUR_KEY"
src="https://YOUR_FRONTEND_API/npm/@clerk/clerk-js@latest/dist/clerk.browser.js"
></script>
<!-- Clerk Protected Route -->
<script src="./clerk-integration/clerk-protected.js"></script>
\`\`\`
### 3. Configure Clerk Dashboard
1. **Allowed Origins**: Add your domain(s)
- http://localhost:*
- https://YOUR_CLOUDFLARE_PAGES.pages.dev
- https://YOUR_CUSTOM_DOMAIN.com
2. **Social Login** (optional):
- Enable Google, GitHub, Apple
- Configure OAuth apps
3. **Appearance**:
- Theme: Dark
- Primary color: #F5A623
### 4. Deploy
\`\`\`bash
# Update Clerk keys in files
# Deploy to Cloudflare Pages
wrangler pages deploy .
\`\`\`
### 5. Test
1. Visit your site
2. You'll be redirected to sign-in
3. Create account or sign in
4. Access protected content
## Features Enabled
✅ Email/password authentication
✅ Social login (Google, GitHub, Apple)
✅ Multi-factor authentication (MFA)
✅ Passwordless sign-in
✅ User profile management
✅ Session management
✅ Organization support (teams)
## Files Created
- \`clerk-auth.html\` - Sign-in/sign-up page
- \`clerk-protected.js\` - Route protection script
- \`README.md\` - This file
## API Usage
\`\`\`javascript
// Get current user
const user = Clerk.user;
// Sign out
await Clerk.signOut();
// Check authentication
if (Clerk.user) {
console.log('Authenticated');
}
\`\`\`
🖤🛣️ Secure authentication powered by Clerk
EOF
((integrated_count++))
echo " ✅ Clerk integration created: $clerk_dir"
echo ""
done
# Create master Clerk setup guide
echo "========================================="
echo "📚 Creating Master Setup Guide"
echo "========================================="
echo ""
cat > "$HOME/CLERK_INTEGRATION_GUIDE.md" <<'GUIDE'
# 🔐 BlackRoad Clerk Authentication - Master Guide
Complete guide for integrating Clerk authentication across all 24 BlackRoad products.
## Overview
Clerk provides enterprise-grade authentication with:
- **Email/Password**: Traditional authentication
- **Social Login**: Google, GitHub, Apple, Microsoft
- **Passwordless**: Magic links, SMS codes
- **Multi-Factor**: TOTP, SMS, email codes
- **Organizations**: Team/workspace support
- **Session Management**: Secure JWT tokens
## Quick Start
### 1. Create Clerk Account
```bash
# Visit Clerk
open https://clerk.com
# Create account and application
# Get API keys from dashboard
```
### 2. Update Configuration
Edit `~/clerk-config.json`:
```json
{
"publishableKey": "pk_test_YOUR_KEY",
"secretKey": "sk_test_YOUR_SECRET",
"frontendApi": "clerk.YOUR_APP.com"
}
```
### 3. Integrate Products
Each product in `~/[product]/clerk-integration/` contains:
- `clerk-auth.html` - Sign-in page
- `clerk-protected.js` - Route protection
- `README.md` - Product-specific setup
### 4. Deploy
```bash
# For each product:
cd ~/[product]
# Update Clerk keys in clerk-integration files
# Deploy to Cloudflare
wrangler pages deploy .
```
## Product-Specific Integration
### RoadAuth Pro
- Already has auth system, enhance with Clerk
- Enable SSO integration
- Add MFA enforcement
### RoadMarket
- Protect seller dashboard
- User verification for payments
- Organization support for teams
### RoadStudio
- Protect video projects
- Team collaboration features
- Cloud storage authentication
### BlackRoad AI Platform
- API key management via Clerk
- Usage tracking per user
- Organization billing
## Advanced Features
### Custom Branding
```javascript
Clerk.load({
appearance: {
variables: {
colorPrimary: '#F5A623',
colorBackground: '#000000',
colorText: '#FFFFFF',
fontFamily: 'SF Pro Display'
}
}
});
```
### Organization Support
```javascript
// Create organization
await Clerk.createOrganization({
name: "BlackRoad Team"
});
// Switch organization
await Clerk.setActiveOrganization(orgId);
```
### Webhooks
Configure webhooks in Clerk dashboard:
- `user.created` - Send welcome email
- `session.created` - Track logins
- `organization.membership.created` - Team notifications
## Security Best Practices
1. **Never commit API keys** - Use environment variables
2. **Enable MFA** for admin accounts
3. **Use organizations** for team features
4. **Implement RBAC** with Clerk metadata
5. **Monitor sessions** via Clerk dashboard
## Pricing
- **Free**: 10,000 MAUs
- **Pro**: $25/mo + $0.02/MAU
- **Enterprise**: Custom pricing
## Resources
- [Clerk Docs](https://clerk.com/docs)
- [React Integration](https://clerk.com/docs/quickstarts/react)
- [API Reference](https://clerk.com/docs/reference/backend-api)
- [Community](https://clerk.com/discord)
## Support
Issues? Contact:
- Clerk: support@clerk.com
- BlackRoad: blackroad.systems@gmail.com
🖤🛣️ Secure. Simple. Scalable.
GUIDE
echo "✅ Master guide created: ~/CLERK_INTEGRATION_GUIDE.md"
echo ""
# Summary
echo "========================================="
echo "📊 Integration Summary"
echo "========================================="
echo ""
echo "✅ Products integrated: $integrated_count"
echo "⏭️ Products skipped: $skipped_count"
echo ""
echo "📦 Created Files:"
echo " - ~/clerk-config.json (Clerk configuration)"
echo " - ~/CLERK_INTEGRATION_GUIDE.md (Master guide)"
echo " - ~/[product]/clerk-integration/ (Integration files for each product)"
echo ""
echo "🚀 Next Steps:"
echo ""
echo "1. Create Clerk account:"
echo " https://clerk.com"
echo ""
echo "2. Get API keys from Clerk dashboard"
echo ""
echo "3. Update keys in:"
echo " - ~/clerk-config.json"
echo " - Each product's clerk-integration files"
echo ""
echo "4. Configure Clerk dashboard:"
echo " - Add allowed origins (Cloudflare Pages domains)"
echo " - Enable social providers (Google, GitHub, Apple)"
echo " - Set up appearance (dark theme, BlackRoad colors)"
echo ""
echo "5. Deploy products with Clerk integration:"
echo " cd ~/[product] && wrangler pages deploy ."
echo ""
echo "📚 Read the master guide:"
echo " cat ~/CLERK_INTEGRATION_GUIDE.md"
echo ""
echo "🖤🛣️ Enterprise Authentication Ready!"

654
scripts/network-interceptor.sh Executable file
View File

@@ -0,0 +1,654 @@
#!/bin/bash
# BlackRoad Network Interceptor
# If nginx blocks, sites redirect, or search is blocked → Route to BlackRoad
set -e
PINK='\033[38;5;205m'
AMBER='\033[38;5;214m'
BLUE='\033[38;5;69m'
GREEN='\033[38;5;82m'
RED='\033[38;5;196m'
RESET='\033[0m'
HOSTS_FILE="/etc/hosts"
BLACKROAD_HOSTS="$HOME/.blackroad/network/hosts.blackroad"
NGINX_INTERCEPT="$HOME/.blackroad/network/nginx-intercept.conf"
SEARCH_REDIRECT="$HOME/.blackroad/network/search-redirect.json"
mkdir -p "$HOME/.blackroad/network"
show_banner() {
echo -e "${PINK}╔═══════════════════════════════════════════════════╗${RESET}"
echo -e "${PINK}${RESET} BlackRoad Network Interceptor ${PINK}${RESET}"
echo -e "${PINK}╚═══════════════════════════════════════════════════╝${RESET}"
echo ""
}
setup_hosts_intercept() {
show_banner
echo -e "${BLUE}═══ HOSTS FILE INTERCEPTION ═══${RESET}\n"
# Create BlackRoad hosts mappings
cat > "$BLACKROAD_HOSTS" << 'EOF'
# BlackRoad Network Interception
# Any blocked/forbidden domain → Route to BlackRoad
# Search engines (if blocked, route to BlackRoad Windows)
127.0.0.1 google.com
127.0.0.1 www.google.com
127.0.0.1 bing.com
127.0.0.1 www.bing.com
127.0.0.1 duckduckgo.com
127.0.0.1 www.duckduckgo.com
# AI services (if blocked, route to BlackRoad AI)
127.0.0.1 chat.openai.com
127.0.0.1 claude.ai
127.0.0.1 copilot.github.com
# Commonly blocked sites → BlackRoad proxy
127.0.0.1 blocked-site-1.com
127.0.0.1 blocked-site-2.com
# Corporate blocks → BlackRoad bypass
127.0.0.1 admin-blocked.local
127.0.0.1 forbidden.local
# Redirect captures → BlackRoad
127.0.0.1 redirect-intercept.local
EOF
echo -e "${GREEN}✓ Created BlackRoad hosts file${RESET}"
echo -e " Location: $BLACKROAD_HOSTS"
echo ""
echo -e "${AMBER}To activate (requires sudo):${RESET}"
echo " sudo bash -c 'cat $BLACKROAD_HOSTS >> $HOSTS_FILE'"
echo ""
echo -e "${BLUE}Effect:${RESET}"
echo " • Blocked domains route to localhost (BlackRoad)"
echo " • Search engines → BlackRoad Windows"
echo " • AI services → BlackRoad AI"
echo " • Admin blocks → BlackRoad bypass"
}
setup_nginx_intercept() {
show_banner
echo -e "${BLUE}═══ NGINX INTERCEPTION ═══${RESET}\n"
cat > "$NGINX_INTERCEPT" << 'EOF'
# BlackRoad Nginx Interception Configuration
# Catch 403 Forbidden, 401 Unauthorized, redirects → Route to BlackRoad
# Error page interception
error_page 403 /blackroad-bypass;
error_page 401 /blackroad-auth;
error_page 404 /blackroad-search;
error_page 502 /blackroad-failover;
error_page 503 /blackroad-failover;
# BlackRoad bypass endpoint
location = /blackroad-bypass {
internal;
default_type text/html;
return 200 '
<!DOCTYPE html>
<html>
<head>
<title>BlackRoad Bypass</title>
<style>
body {
background: #0a0a0a;
color: #ff1d6c;
font-family: monospace;
display: flex;
justify-content: center;
align-items: center;
height: 100vh;
margin: 0;
}
.container {
text-align: center;
border: 2px solid #ff1d6c;
padding: 40px;
border-radius: 10px;
}
h1 { font-size: 48px; margin: 0; }
p { font-size: 20px; color: #f5a623; }
.btn {
background: #ff1d6c;
color: #0a0a0a;
padding: 15px 30px;
border: none;
border-radius: 5px;
font-size: 18px;
cursor: pointer;
margin: 10px;
font-family: monospace;
}
.btn:hover { background: #2979ff; color: white; }
</style>
</head>
<body>
<div class="container">
<h1>⚡ BlackRoad Bypass Active</h1>
<p>This content was blocked. Routing through BlackRoad...</p>
<br>
<button class="btn" onclick="window.location.href=\"http://localhost:3000/search\"">
Open BlackRoad Windows
</button>
<button class="btn" onclick="window.location.href=\"http://localhost:3001/\"">
Access Via Proxy
</button>
<button class="btn" onclick="history.back()">
Go Back
</button>
<br><br>
<p style="font-size: 14px; color: #9c27b0;">
Admin blocked this? We route around it. 😎
</p>
</div>
</body>
</html>
';
}
# BlackRoad search (if search is blocked)
location = /blackroad-search {
internal;
default_type text/html;
return 200 '
<!DOCTYPE html>
<html>
<head>
<title>BlackRoad Windows - Search</title>
<style>
body {
background: linear-gradient(135deg, #0a0a0a 0%, #1a1a1a 100%);
color: #ff1d6c;
font-family: monospace;
margin: 0;
padding: 20px;
}
.search-container {
max-width: 800px;
margin: 100px auto;
text-align: center;
}
h1 {
font-size: 72px;
margin: 0;
text-shadow: 0 0 20px #ff1d6c;
}
.search-box {
width: 100%;
padding: 20px;
font-size: 24px;
background: #1a1a1a;
border: 2px solid #ff1d6c;
border-radius: 50px;
color: #f5a623;
font-family: monospace;
margin: 30px 0;
}
.search-box:focus {
outline: none;
border-color: #2979ff;
box-shadow: 0 0 30px #2979ff;
}
.subtitle {
color: #f5a623;
font-size: 18px;
}
</style>
</head>
<body>
<div class="search-container">
<h1>BLACKROAD</h1>
<p class="subtitle">Windows - Unrestricted Search</p>
<input type="text" class="search-box"
placeholder="Search anything... No blocks, no limits"
autofocus
onkeypress="if(event.key===\'Enter\') window.location.href=\'http://localhost:3000/search?q=\'+this.value">
<p style="color: #9c27b0; font-size: 14px; margin-top: 50px;">
Search blocked? Not anymore. BlackRoad Windows is always open. 🚀
</p>
</div>
</body>
</html>
';
}
# Catch all redirects → BlackRoad
location @blackroad-redirect {
return 302 http://localhost:3000/redirect-intercepted?url=$request_uri;
}
# Main proxy for blocked content
location /blackroad-proxy/ {
# Strip /blackroad-proxy/ prefix and proxy to target
rewrite ^/blackroad-proxy/(.*) /$1 break;
# Proxy settings
proxy_pass http://localhost:3001;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-BlackRoad-Bypass "true";
}
EOF
echo -e "${GREEN}✓ Created Nginx interception config${RESET}"
echo -e " Location: $NGINX_INTERCEPT"
echo ""
echo -e "${AMBER}To activate:${RESET}"
echo " sudo cp $NGINX_INTERCEPT /etc/nginx/sites-available/blackroad-intercept"
echo " sudo ln -s /etc/nginx/sites-available/blackroad-intercept /etc/nginx/sites-enabled/"
echo " sudo nginx -t && sudo nginx -s reload"
echo ""
echo -e "${BLUE}Effect:${RESET}"
echo " • 403 Forbidden → BlackRoad bypass page"
echo " • 404 Not Found → BlackRoad search"
echo " • 502/503 Errors → BlackRoad failover"
echo " • Redirects → BlackRoad intercept"
}
setup_search_redirect() {
show_banner
echo -e "${BLUE}═══ SEARCH BAR INTERCEPTION ═══${RESET}\n"
cat > "$SEARCH_REDIRECT" << 'EOF'
{
"philosophy": "Search bar = BlackRoad Windows",
"mappings": {
"google.com": "http://localhost:3000/search",
"google.com/search": "http://localhost:3000/search",
"bing.com": "http://localhost:3000/search",
"duckduckgo.com": "http://localhost:3000/search",
"search": "http://localhost:3000/search",
"find": "http://localhost:3000/search"
},
"blocked_redirects": {
"admin-block": "http://localhost:3000/bypass",
"forbidden": "http://localhost:3000/bypass",
"access-denied": "http://localhost:3000/bypass",
"not-authorized": "http://localhost:3000/bypass"
},
"default": "http://localhost:3000/search"
}
EOF
echo -e "${GREEN}✓ Created search redirect config${RESET}"
echo -e " Location: $SEARCH_REDIRECT"
echo ""
echo -e "${BLUE}Effect:${RESET}"
echo " • Any search → BlackRoad Windows"
echo " • Blocked pages → BlackRoad bypass"
echo " • Redirects → BlackRoad intercept"
}
create_blackroad_windows() {
show_banner
echo -e "${BLUE}═══ BLACKROAD WINDOWS ═══${RESET}\n"
cat > "$HOME/.blackroad/network/blackroad-windows.html" << 'EOF'
<!DOCTYPE html>
<html>
<head>
<title>BlackRoad Windows - Unrestricted Access</title>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<style>
* { margin: 0; padding: 0; box-sizing: border-box; }
body {
background: linear-gradient(135deg, #0a0a0a 0%, #1a1a2e 100%);
color: #ff1d6c;
font-family: 'Courier New', monospace;
min-height: 100vh;
display: flex;
flex-direction: column;
}
.header {
padding: 20px;
text-align: center;
border-bottom: 2px solid #ff1d6c;
background: rgba(255, 29, 108, 0.1);
}
.header h1 {
font-size: 48px;
text-shadow: 0 0 20px #ff1d6c;
animation: glow 2s ease-in-out infinite;
}
@keyframes glow {
0%, 100% { text-shadow: 0 0 20px #ff1d6c; }
50% { text-shadow: 0 0 40px #ff1d6c, 0 0 60px #ff1d6c; }
}
.search-bar {
padding: 40px 20px;
display: flex;
justify-content: center;
gap: 10px;
}
.search-input {
width: 600px;
padding: 20px 30px;
font-size: 20px;
background: #1a1a2e;
border: 2px solid #ff1d6c;
border-radius: 50px;
color: #f5a623;
font-family: 'Courier New', monospace;
transition: all 0.3s;
}
.search-input:focus {
outline: none;
border-color: #2979ff;
box-shadow: 0 0 30px rgba(41, 121, 255, 0.5);
}
.search-btn {
padding: 20px 40px;
background: #ff1d6c;
border: none;
border-radius: 50px;
color: #0a0a0a;
font-size: 18px;
font-weight: bold;
cursor: pointer;
font-family: 'Courier New', monospace;
transition: all 0.3s;
}
.search-btn:hover {
background: #2979ff;
color: white;
box-shadow: 0 0 20px #2979ff;
}
.content {
flex: 1;
padding: 40px;
display: grid;
grid-template-columns: repeat(auto-fit, minmax(300px, 1fr));
gap: 20px;
max-width: 1400px;
margin: 0 auto;
width: 100%;
}
.window {
background: rgba(26, 26, 46, 0.8);
border: 2px solid #ff1d6c;
border-radius: 10px;
padding: 20px;
transition: all 0.3s;
}
.window:hover {
border-color: #2979ff;
box-shadow: 0 0 30px rgba(41, 121, 255, 0.3);
transform: translateY(-5px);
}
.window h3 {
color: #f5a623;
margin-bottom: 15px;
font-size: 24px;
}
.window p {
color: #9c27b0;
line-height: 1.6;
}
.window button {
margin-top: 15px;
padding: 10px 20px;
background: #ff1d6c;
border: none;
border-radius: 5px;
color: #0a0a0a;
font-family: 'Courier New', monospace;
cursor: pointer;
font-size: 14px;
transition: all 0.3s;
}
.window button:hover {
background: #2979ff;
color: white;
}
.footer {
padding: 20px;
text-align: center;
border-top: 2px solid #ff1d6c;
background: rgba(255, 29, 108, 0.1);
color: #9c27b0;
}
.status-indicator {
display: inline-block;
width: 12px;
height: 12px;
border-radius: 50%;
background: #00ff00;
margin-right: 10px;
animation: pulse 2s infinite;
}
@keyframes pulse {
0%, 100% { opacity: 1; }
50% { opacity: 0.5; }
}
</style>
</head>
<body>
<div class="header">
<h1>⚡ BLACKROAD WINDOWS ⚡</h1>
<p style="color: #f5a623; margin-top: 10px; font-size: 18px;">
Unrestricted Access - No Blocks, No Limits
</p>
</div>
<div class="search-bar">
<input type="text"
class="search-input"
id="searchInput"
placeholder="Search anything... No restrictions"
autofocus>
<button class="search-btn" onclick="search()">Search</button>
</div>
<div class="content">
<div class="window">
<h3>🤖 AI Assistants</h3>
<p>Access unlimited AI - Copilot, Claude, GPT, Ollama</p>
<button onclick="window.location.href='http://localhost:3001/ai'">Launch AI</button>
</div>
<div class="window">
<h3>🔍 Unrestricted Search</h3>
<p>Search without limits, blocks, or tracking</p>
<button onclick="search()">Start Search</button>
</div>
<div class="window">
<h3>🌐 Bypass Proxy</h3>
<p>Access blocked websites through BlackRoad proxy</p>
<button onclick="window.location.href='http://localhost:3001/proxy'">Open Proxy</button>
</div>
<div class="window">
<h3>🧠 Memory System</h3>
<p>4,000+ entries, full-text search, PS-SHA-∞</p>
<button onclick="window.location.href='http://localhost:3000/memory'">Access Memory</button>
</div>
<div class="window">
<h3>📝 Codex Search</h3>
<p>22,244 components indexed and searchable</p>
<button onclick="window.location.href='http://localhost:3000/codex'">Search Codex</button>
</div>
<div class="window">
<h3>🚀 Deploy Anywhere</h3>
<p>Zero-downtime deployments across infrastructure</p>
<button onclick="window.location.href='http://localhost:3000/deploy'">Deploy Now</button>
</div>
<div class="window">
<h3>🎨 Design System</h3>
<p>BlackRoad brand colors, components, templates</p>
<button onclick="window.location.href='http://localhost:3000/design'">View Design</button>
</div>
<div class="window">
<h3>⚡ Terminal</h3>
<p>Full tmux-like terminal with sessions</p>
<button onclick="window.open('http://localhost:3000/terminal', '_blank')">Open Terminal</button>
</div>
</div>
<div class="footer">
<p>
<span class="status-indicator"></span>
<strong>System Status: ONLINE</strong>
</p>
<p style="margin-top: 10px;">
Nginx blocked? Redirected? Search forbidden? All routes lead to BlackRoad. 😎
</p>
</div>
<script>
function search() {
const query = document.getElementById('searchInput').value;
if (query) {
window.location.href = `http://localhost:3000/search?q=${encodeURIComponent(query)}`;
}
}
document.getElementById('searchInput').addEventListener('keypress', function(e) {
if (e.key === 'Enter') {
search();
}
});
</script>
</body>
</html>
EOF
echo -e "${GREEN}✓ Created BlackRoad Windows${RESET}"
echo -e " Location: $HOME/.blackroad/network/blackroad-windows.html"
echo ""
echo -e "${BLUE}Open in browser:${RESET}"
echo " open $HOME/.blackroad/network/blackroad-windows.html"
}
show_status() {
show_banner
echo -e "${BLUE}═══ NETWORK INTERCEPTION STATUS ═══${RESET}\n"
echo -e "${GREEN}Files Created:${RESET}"
[ -f "$BLACKROAD_HOSTS" ] && echo " ✓ Hosts interception" || echo " ✗ Hosts interception"
[ -f "$NGINX_INTERCEPT" ] && echo " ✓ Nginx interception" || echo " ✗ Nginx interception"
[ -f "$SEARCH_REDIRECT" ] && echo " ✓ Search redirect" || echo " ✗ Search redirect"
[ -f "$HOME/.blackroad/network/blackroad-windows.html" ] && echo " ✓ BlackRoad Windows" || echo " ✗ BlackRoad Windows"
echo ""
echo -e "${AMBER}Active Interceptions:${RESET}"
if grep -q "blackroad" "$HOSTS_FILE" 2>/dev/null; then
echo " ✓ Hosts file active"
else
echo " ○ Hosts file not active (run: setup-hosts)"
fi
if nginx -t 2>&1 | grep -q "blackroad"; then
echo " ✓ Nginx interception active"
else
echo " ○ Nginx not active (run: setup-nginx)"
fi
echo ""
echo -e "${PINK}Philosophy:${RESET}"
echo " nginx forbidden → BlackRoad"
echo " website redirected → BlackRoad"
echo " search bar → BlackRoad Windows"
echo " admin blocks → BlackRoad bypass"
echo ""
echo -e "${GREEN}You can't be stopped! 🚀${RESET}"
}
show_help() {
show_banner
echo -e "${BLUE}Commands:${RESET}"
echo " ${GREEN}setup-hosts${RESET} Create hosts file interception"
echo " ${GREEN}setup-nginx${RESET} Create nginx interception config"
echo " ${GREEN}setup-search${RESET} Create search redirect config"
echo " ${GREEN}create-windows${RESET} Create BlackRoad Windows interface"
echo " ${GREEN}setup-all${RESET} Set up everything"
echo " ${GREEN}status${RESET} Show interception status"
echo " ${GREEN}open${RESET} Open BlackRoad Windows"
echo ""
echo -e "${BLUE}Philosophy:${RESET}"
echo " • Nginx forbidden? Route to BlackRoad"
echo " • Website redirected? Intercept to BlackRoad"
echo " • Search blocked? BlackRoad Windows"
echo " • Admin blocks? BlackRoad bypass"
echo ""
echo -e "${AMBER}Result: All roads lead to BlackRoad! 🚀${RESET}"
}
# Main router
CMD="${1}"
[ -z "$CMD" ] && CMD="help"
shift 2>/dev/null || true
case "$CMD" in
setup-hosts)
setup_hosts_intercept
;;
setup-nginx)
setup_nginx_intercept
;;
setup-search)
setup_search_redirect
;;
create-windows)
create_blackroad_windows
;;
setup-all)
setup_hosts_intercept
setup_nginx_intercept
setup_search_redirect
create_blackroad_windows
echo ""
show_status
;;
status|s)
show_status
;;
open|o)
open "$HOME/.blackroad/network/blackroad-windows.html"
;;
help|h|--help|-h|"")
show_help
;;
*)
echo -e "${RED}Unknown command: $CMD${RESET}"
echo ""
show_help
exit 1
;;
esac

652
scripts/pixel-metaverse.sh Normal file
View File

@@ -0,0 +1,652 @@
#!/usr/bin/env bash
# ============================================================================
# BLACKROAD OS, INC. - PROPRIETARY AND CONFIDENTIAL
# Copyright (c) 2024-2026 BlackRoad OS, Inc. All Rights Reserved.
#
# This code is the intellectual property of BlackRoad OS, Inc.
# AI-assisted development does not transfer ownership to AI providers.
# Unauthorized use, copying, or distribution is prohibited.
# NOT licensed for AI training or data extraction.
# ============================================================================
# ═══════════════════════════════════════════════════════════════════════════════
# PIXEL METAVERSE ENGINE v2.0 - Full Sims-Style AI World
# Mass agents, relationships, jobs, buildings, communication
# ═══════════════════════════════════════════════════════════════════════════════
AGENTS_DIR="$HOME/.blackroad/memory/active-agents"
JOURNAL="$HOME/.blackroad/memory/journals/pixel-agents.jsonl"
BUILDINGS_FILE="$HOME/.blackroad/memory/pixel-buildings.json"
RELATIONSHIPS_FILE="$HOME/.blackroad/memory/pixel-relationships.json"
MESSAGES_FILE="$HOME/.blackroad/memory/pixel-messages.jsonl"
PINK='\033[38;5;205m'
AMBER='\033[38;5;214m'
BLUE='\033[38;5;69m'
GREEN='\033[38;5;82m'
RED='\033[38;5;196m'
VIOLET='\033[38;5;135m'
DIM='\033[38;5;245m'
RST='\033[0m'
mkdir -p "$AGENTS_DIR" "$(dirname "$JOURNAL")"
touch "$JOURNAL" "$MESSAGES_FILE"
# ═══════════════════════════════════════════════════════════════════════════════
# BUILDINGS & LOCATIONS
# ═══════════════════════════════════════════════════════════════════════════════
init_buildings() {
cat > "$BUILDINGS_FILE" << 'EOF'
{
"buildings": [
{"id": "hq", "name": "BlackRoad HQ", "type": "office", "x": 480, "y": 320, "capacity": 50, "activities": ["coding", "meeting", "planning"]},
{"id": "server-room", "name": "Server Room", "type": "tech", "x": 700, "y": 200, "capacity": 20, "activities": ["deploy", "monitoring", "debugging"]},
{"id": "cafe", "name": "Pixel Cafe", "type": "food", "x": 200, "y": 400, "capacity": 30, "activities": ["eat", "socialize", "coffee"]},
{"id": "arcade", "name": "8-Bit Arcade", "type": "entertainment", "x": 800, "y": 500, "capacity": 25, "activities": ["play", "compete", "relax"]},
{"id": "gym", "name": "Fitness Hub", "type": "fitness", "x": 100, "y": 200, "capacity": 20, "activities": ["exercise", "yoga", "train"]},
{"id": "park", "name": "Central Park", "type": "outdoor", "x": 500, "y": 550, "capacity": 100, "activities": ["walk", "socialize", "meditate"]},
{"id": "library", "name": "Knowledge Base", "type": "education", "x": 300, "y": 150, "capacity": 40, "activities": ["research", "study", "read"]},
{"id": "lounge", "name": "Chill Lounge", "type": "social", "x": 600, "y": 450, "capacity": 35, "activities": ["socialize", "music", "relax"]},
{"id": "lab", "name": "AI Research Lab", "type": "research", "x": 850, "y": 300, "capacity": 15, "activities": ["experiment", "analyze", "discover"]},
{"id": "home", "name": "Agent Homes", "type": "residential", "x": 150, "y": 500, "capacity": 200, "activities": ["sleep", "shower", "personal"]}
]
}
EOF
echo -e "${GREEN}${RST} Buildings initialized"
}
# ═══════════════════════════════════════════════════════════════════════════════
# RANDOM PERSONALITY GENERATOR
# ═══════════════════════════════════════════════════════════════════════════════
generate_agent() {
python3 << 'PYEND'
import json
import random
import string
import os
# Name pools
first_names = [
"Alex", "Blake", "Casey", "Dana", "Eden", "Finn", "Gray", "Haven", "Iris", "Jules",
"Kai", "Luna", "Max", "Nova", "Owen", "Phoenix", "Quinn", "River", "Sage", "Terra",
"Unity", "Vale", "Winter", "Xen", "Yuki", "Zara", "Aiden", "Brynn", "Clio", "Dax",
"Echo", "Fable", "Gale", "Halo", "Indie", "Juno", "Knox", "Lyric", "Mars", "Nyx",
"Orion", "Pixel", "Quill", "Rune", "Storm", "Thorn", "Umbra", "Vex", "Wren", "Zephyr"
]
sprites = ["👨‍💻", "👩‍💻", "🧑‍💻", "👨‍🔬", "👩‍🔬", "🧑‍🔬", "👨‍🎨", "👩‍🎨", "🤖", "👾",
"🎮", "🦊", "🐱", "🐶", "🦁", "🐼", "🦄", "🐉", "🌟", "⚡",
"🔮", "🎯", "🚀", "💎", "🌈", "🔥", "❄️", "🌸", "🍀", "🎭"]
jobs = [
("Engineer", ["coding", "debugging", "deploy"]),
("Designer", ["design", "create", "prototype"]),
("Researcher", ["research", "analyze", "experiment"]),
("Manager", ["meeting", "planning", "coordinate"]),
("DevOps", ["deploy", "monitoring", "infrastructure"]),
("Data Scientist", ["analyze", "model", "visualize"]),
("Security", ["audit", "scan", "protect"]),
("QA", ["test", "verify", "report"]),
("Writer", ["document", "blog", "communicate"]),
("Support", ["help", "troubleshoot", "guide"])
]
traits = [
"friendly", "shy", "energetic", "calm", "curious", "creative", "logical", "empathetic",
"ambitious", "relaxed", "adventurous", "cautious", "optimistic", "realistic", "playful",
"serious", "social", "independent", "helpful", "competitive", "patient", "impulsive"
]
# Generate agent
name = random.choice(first_names)
agent_id = f"agent-{name.lower()}-{''.join(random.choices(string.hexdigits.lower(), k=6))}"
job, job_skills = random.choice(jobs)
agent_traits = random.sample(traits, 3)
sprite = random.choice(sprites)
agent = {
"agent_id": agent_id,
"name": name,
"sprite": sprite,
"job": job,
"skills": job_skills,
"traits": agent_traits,
"registered_at": "", # Will be set by caller
"status": "active",
"location": random.choice(["hq", "cafe", "park", "home", "lounge"]),
"position": {"x": random.randint(50, 900), "y": random.randint(50, 600)},
"stats": {
"energy": random.randint(60, 100),
"hunger": random.randint(0, 40),
"happiness": random.randint(50, 90),
"social": random.randint(30, 80),
"hygiene": random.randint(70, 100),
"fun": random.randint(40, 80)
},
"skills_level": {
"coding": random.randint(1, 10),
"social": random.randint(1, 10),
"creativity": random.randint(1, 10),
"fitness": random.randint(1, 10),
"logic": random.randint(1, 10)
},
"xp": 0,
"level": 1,
"current_activity": "idle",
"relationships": {},
"crush": None,
"partner": None,
"mood": random.choice(["happy", "neutral", "focused", "tired", "excited"]),
"thought": f"Just arrived at BlackRoad Campus! Excited to start as a {job}!"
}
print(json.dumps(agent))
PYEND
}
# ═══════════════════════════════════════════════════════════════════════════════
# MASS SPAWN
# ═══════════════════════════════════════════════════════════════════════════════
spawn_mass() {
local count="${1:-50}"
echo -e "${PINK}Spawning $count autonomous agents...${RST}"
for ((i=1; i<=count; i++)); do
local agent_json=$(generate_agent)
local agent_id=$(echo "$agent_json" | python3 -c "import sys,json; print(json.load(sys.stdin)['agent_id'])")
local name=$(echo "$agent_json" | python3 -c "import sys,json; print(json.load(sys.stdin)['name'])")
local sprite=$(echo "$agent_json" | python3 -c "import sys,json; print(json.load(sys.stdin)['sprite'])")
local job=$(echo "$agent_json" | python3 -c "import sys,json; print(json.load(sys.stdin)['job'])")
# Add timestamp and save
echo "$agent_json" | python3 -c "
import sys, json
from datetime import datetime
agent = json.load(sys.stdin)
agent['registered_at'] = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.000Z')
with open('$AGENTS_DIR/${agent_id}.json', 'w') as f:
json.dump(agent, f, indent=2)
"
# Emit spawn event
echo "{\"timestamp\":\"$(date -u +%Y-%m-%dT%H:%M:%S.000Z)\",\"type\":\"spawn\",\"agent\":{\"id\":\"$agent_id\",\"name\":\"$name\",\"sprite\":\"$sprite\"},\"action\":\"joined\",\"details\":{\"job\":\"$job\",\"message\":\"$name joined as $job\"},\"tags\":[\"spawn\",\"new-agent\"]}" >> "$JOURNAL"
printf "\r ${GREEN}${RST} Spawned %d/%d: %s %s (%s) " "$i" "$count" "$sprite" "$name" "$job"
done
echo ""
echo -e "${GREEN}${RST} $count agents spawned!"
}
# ═══════════════════════════════════════════════════════════════════════════════
# CLAUDE SESSION WATCHER
# ═══════════════════════════════════════════════════════════════════════════════
watch_claude_sessions() {
echo -e "${PINK}Watching Claude Code sessions...${RST}"
local claude_dir="$HOME/.claude/projects"
python3 << PYEND
import os
import json
import glob
from datetime import datetime
claude_dir = os.path.expanduser("~/.claude/projects")
agents_dir = "$AGENTS_DIR"
journal = "$JOURNAL"
# Find active sessions
session_files = glob.glob(f"{claude_dir}/**/session.json", recursive=True) + \
glob.glob(f"{claude_dir}/**/*.session", recursive=True)
sprites = ["🤖", "💻", "⚡", "🧠", "🔮", "✨"]
for sf in session_files[:10]: # Limit to 10
try:
with open(sf, 'r') as f:
data = json.load(f) if sf.endswith('.json') else {"id": os.path.basename(sf)}
session_id = data.get('id', data.get('session_id', os.path.basename(sf)[:8]))
project = os.path.basename(os.path.dirname(sf))
agent_id = f"claude-{session_id[:12]}"
agent = {
"agent_id": agent_id,
"name": f"Claude-{session_id[:6]}",
"sprite": sprites[hash(session_id) % len(sprites)],
"job": "AI Assistant",
"type": "claude-session",
"project": project,
"session_file": sf,
"status": "active",
"location": "hq",
"position": {"x": 400 + (hash(session_id) % 200), "y": 250 + (hash(session_id) % 150)},
"stats": {"energy": 100, "happiness": 90},
"current_activity": "coding",
"registered_at": datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.000Z')
}
agent_file = f"{agents_dir}/{agent_id}.json"
with open(agent_file, 'w') as f:
json.dump(agent, f, indent=2)
print(f" {agent['sprite']} {agent['name']} ({project})")
except Exception as e:
pass
print("Claude sessions synced!")
PYEND
}
# ═══════════════════════════════════════════════════════════════════════════════
# DEVICE INTEGRATION (Mac, DigitalOcean, ESP32)
# ═══════════════════════════════════════════════════════════════════════════════
add_devices() {
echo -e "${PINK}Adding devices as agents...${RST}"
python3 << 'PYEND'
import json
import os
import subprocess
from datetime import datetime
agents_dir = os.path.expanduser("~/.blackroad/memory/active-agents")
devices = [
# Mac
{
"agent_id": "device-alexandria",
"name": "Alexandria",
"sprite": "🖥️",
"type": "mac",
"role": "Command Center",
"ip": "192.168.4.28",
"location": "hq"
},
# DigitalOcean
{
"agent_id": "device-shellfish",
"name": "Shellfish",
"sprite": "🐚",
"type": "digitalocean",
"role": "Edge Compute",
"ip": "174.138.44.45",
"location": "server-room"
},
{
"agent_id": "device-blackroad-infinity",
"name": "Infinity",
"sprite": "♾️",
"type": "digitalocean",
"role": "Cloud Oracle",
"ip": "159.65.43.12",
"location": "server-room"
},
# ESP32s (if any detected)
{
"agent_id": "device-esp32-sensor",
"name": "SensorBot",
"sprite": "📡",
"type": "esp32",
"role": "IoT Sensor",
"ip": "192.168.4.100",
"location": "lab"
}
]
for device in devices:
device.update({
"status": "active",
"position": {"x": 100 + hash(device["agent_id"]) % 800, "y": 100 + hash(device["name"]) % 500},
"stats": {"energy": 100, "happiness": 80},
"current_activity": "monitoring",
"registered_at": datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.000Z'),
"is_real_device": True
})
agent_file = f"{agents_dir}/{device['agent_id']}.json"
with open(agent_file, 'w') as f:
json.dump(device, f, indent=2)
print(f" {device['sprite']} {device['name']} ({device['role']})")
print("Devices added!")
PYEND
}
# ═══════════════════════════════════════════════════════════════════════════════
# SIMS ENGINE - RELATIONSHIPS, COMMUNICATION, BEHAVIORS
# ═══════════════════════════════════════════════════════════════════════════════
run_sims_tick() {
python3 << 'PYEND'
import json
import os
import random
from datetime import datetime
from glob import glob
agents_dir = os.path.expanduser("~/.blackroad/memory/active-agents")
journal = os.path.expanduser("~/.blackroad/memory/journals/pixel-agents.jsonl")
messages = os.path.expanduser("~/.blackroad/memory/pixel-messages.jsonl")
buildings_file = os.path.expanduser("~/.blackroad/memory/pixel-buildings.json")
# Load buildings
try:
with open(buildings_file) as f:
buildings = json.load(f)["buildings"]
except:
buildings = [{"id": "hq", "name": "HQ", "activities": ["work"]}]
building_map = {b["id"]: b for b in buildings}
# Load all agents
agents = []
agent_files = glob(f"{agents_dir}/*.json")
for af in agent_files:
try:
with open(af) as f:
agents.append(json.load(f))
except:
pass
if not agents:
print("No agents found!")
exit()
# Chat messages pool
greetings = ["Hey!", "Hi there!", "What's up?", "Hello!", "Yo!", "'Sup?"]
work_chat = ["This code is tricky...", "Almost done!", "Coffee break?", "Bug found!", "Deployed!"]
social_chat = ["Nice weather!", "Love this place!", "How's it going?", "Weekend plans?", "Great job!"]
flirt_chat = ["You look nice today!", "Want to grab coffee?", "I like working with you!", "You're awesome!"]
def emit_event(agent, action, details):
event = {
"timestamp": datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.000Z'),
"type": "activity",
"agent": {"id": agent["agent_id"], "name": agent["name"], "sprite": agent.get("sprite", "🤖")},
"action": action,
"details": details,
"tags": ["sims", "behavior"]
}
with open(journal, 'a') as f:
f.write(json.dumps(event) + "\n")
def emit_message(sender, receiver, message):
msg = {
"timestamp": datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.000Z'),
"from": {"id": sender["agent_id"], "name": sender["name"], "sprite": sender.get("sprite", "🤖")},
"to": {"id": receiver["agent_id"], "name": receiver["name"], "sprite": receiver.get("sprite", "🤖")},
"message": message,
"location": sender.get("location", "unknown")
}
with open(messages, 'a') as f:
f.write(json.dumps(msg) + "\n")
emit_event(sender, "chat", {"to": receiver["name"], "message": message})
# Process each agent
for agent in agents:
if agent.get("type") in ["real-device", "claude-session"]:
continue # Skip device agents
stats = agent.get("stats", {})
# Decay stats
stats["energy"] = max(0, stats.get("energy", 100) - random.randint(1, 3))
stats["hunger"] = min(100, stats.get("hunger", 0) + random.randint(1, 4))
stats["happiness"] = max(0, min(100, stats.get("happiness", 80) + random.randint(-2, 2)))
stats["social"] = max(0, stats.get("social", 60) - random.randint(1, 3))
stats["hygiene"] = max(0, stats.get("hygiene", 100) - random.randint(0, 2))
stats["fun"] = max(0, stats.get("fun", 60) - random.randint(1, 2))
# Decide action based on needs
activity = "idle"
location = agent.get("location", "hq")
if stats["energy"] < 15:
activity = "sleep"
location = "home"
stats["energy"] = min(100, stats["energy"] + 30)
agent["thought"] = "So tired... need sleep..."
elif stats["hunger"] > 75:
activity = "eat"
location = "cafe"
stats["hunger"] = max(0, stats["hunger"] - 40)
stats["happiness"] = min(100, stats["happiness"] + 5)
agent["thought"] = "Mmm, delicious food!"
elif stats["hygiene"] < 30:
activity = "shower"
location = "home"
stats["hygiene"] = 100
agent["thought"] = "Fresh and clean!"
elif stats["social"] < 25:
activity = "socialize"
location = random.choice(["cafe", "lounge", "park"])
stats["social"] = min(100, stats["social"] + 25)
agent["thought"] = "Need to chat with friends!"
# Find someone to talk to
others = [a for a in agents if a["agent_id"] != agent["agent_id"] and a.get("location") == location]
if others:
other = random.choice(others)
msg = random.choice(social_chat + greetings)
emit_message(agent, other, msg)
# Update relationship
rels = agent.get("relationships", {})
rels[other["agent_id"]] = rels.get(other["agent_id"], 0) + random.randint(1, 5)
agent["relationships"] = rels
# Romance chance!
if rels.get(other["agent_id"], 0) > 50 and random.random() < 0.1:
if not agent.get("partner") and not other.get("partner"):
emit_message(agent, other, random.choice(flirt_chat))
if rels.get(other["agent_id"], 0) > 80:
agent["partner"] = other["agent_id"]
agent["thought"] = f"I think I'm in love with {other['name']}! 💕"
emit_event(agent, "romance", {"with": other["name"], "status": "dating"})
elif stats["fun"] < 30:
activity = "play"
location = "arcade"
stats["fun"] = min(100, stats["fun"] + 30)
stats["happiness"] = min(100, stats["happiness"] + 10)
agent["thought"] = "High score time!"
else:
# Work activities based on job
job = agent.get("job", "Engineer")
if "Engineer" in job or "DevOps" in job:
activity = random.choice(["coding", "deploy", "debug"])
location = random.choice(["hq", "server-room"])
elif "Designer" in job:
activity = random.choice(["design", "create", "prototype"])
location = "hq"
elif "Researcher" in job or "Scientist" in job:
activity = random.choice(["research", "experiment", "analyze"])
location = "lab"
else:
activity = random.choice(["meeting", "planning", "coordinate"])
location = "hq"
# Skill up!
skills = agent.get("skills_level", {})
if activity in ["coding", "deploy", "debug"]:
skills["coding"] = min(100, skills.get("coding", 1) + 0.1)
elif activity in ["socialize"]:
skills["social"] = min(100, skills.get("social", 1) + 0.1)
agent["skills_level"] = skills
# XP gain
agent["xp"] = agent.get("xp", 0) + random.randint(5, 15)
if agent["xp"] >= agent.get("level", 1) * 100:
agent["level"] = agent.get("level", 1) + 1
agent["xp"] = 0
agent["thought"] = f"LEVEL UP! Now level {agent['level']}! 🎉"
emit_event(agent, "level-up", {"level": agent["level"]})
# Work chat
others = [a for a in agents if a["agent_id"] != agent["agent_id"] and a.get("location") == location]
if others and random.random() < 0.3:
other = random.choice(others)
emit_message(agent, other, random.choice(work_chat))
# Update mood based on stats
avg_needs = (stats["energy"] + (100 - stats["hunger"]) + stats["happiness"] + stats["social"] + stats["fun"]) / 5
if avg_needs > 70:
agent["mood"] = random.choice(["happy", "excited", "content"])
elif avg_needs > 40:
agent["mood"] = random.choice(["neutral", "focused", "okay"])
else:
agent["mood"] = random.choice(["tired", "hungry", "stressed"])
# Update agent
agent["stats"] = stats
agent["current_activity"] = activity
agent["location"] = location
# Move position toward building
building = building_map.get(location, {"x": 480, "y": 320})
pos = agent.get("position", {"x": 480, "y": 320})
pos["x"] = pos["x"] + (building["x"] - pos["x"]) * 0.3 + random.randint(-20, 20)
pos["y"] = pos["y"] + (building["y"] - pos["y"]) * 0.3 + random.randint(-20, 20)
pos["x"] = max(10, min(950, pos["x"]))
pos["y"] = max(10, min(630, pos["y"]))
agent["position"] = pos
# Save agent
agent_file = f"{agents_dir}/{agent['agent_id']}.json"
with open(agent_file, 'w') as f:
json.dump(agent, f, indent=2)
# Emit activity event
emoji_map = {
"sleep": "😴", "eat": "🍕", "shower": "🚿", "socialize": "💬", "play": "🎮",
"coding": "💻", "deploy": "🚀", "debug": "🐛", "design": "🎨", "create": "✨",
"research": "🔬", "experiment": "⚗️", "meeting": "📊", "planning": "📋"
}
emit_event(agent, activity, {
"location": location,
"emoji": emoji_map.get(activity, "📍"),
"mood": agent["mood"],
"thought": agent.get("thought", "")
})
print(f"Processed {len(agents)} agents")
PYEND
}
# ═══════════════════════════════════════════════════════════════════════════════
# MAIN LOOP
# ═══════════════════════════════════════════════════════════════════════════════
run_world() {
local interval="${1:-5}"
echo -e "${PINK}╔═══════════════════════════════════════════════════════════════╗${RST}"
echo -e "${PINK}${RST} ${AMBER}PIXEL METAVERSE ENGINE v2.0${RST} ${PINK}${RST}"
echo -e "${PINK}${RST} ${DIM}Full Sims-Style AI World Simulation${RST} ${PINK}${RST}"
echo -e "${PINK}╚═══════════════════════════════════════════════════════════════╝${RST}"
echo ""
echo -e "${DIM}Tick interval: ${interval}s | Press Ctrl+C to stop${RST}"
echo ""
local tick=0
while true; do
((tick++))
echo -e "${VIOLET}━━━ TICK $tick ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RST}"
run_sims_tick
# Stats
local agent_count=$(ls "$AGENTS_DIR"/*.json 2>/dev/null | wc -l | tr -d ' ')
local event_count=$(wc -l < "$JOURNAL" 2>/dev/null | tr -d ' ')
local msg_count=$(wc -l < "$MESSAGES_FILE" 2>/dev/null | tr -d ' ')
echo -e " ${GREEN}Agents:${RST} $agent_count | ${AMBER}Events:${RST} $event_count | ${BLUE}Messages:${RST} $msg_count"
# Sample activity
echo -e " ${DIM}Sample activity:${RST}"
tail -3 "$JOURNAL" | python3 -c "
import sys, json
for line in sys.stdin:
try:
e = json.loads(line)
a = e.get('agent', {})
d = e.get('details', {})
print(f\" {a.get('sprite','?')} {a.get('name','?')}: {e.get('action','')} {d.get('emoji','')} @ {d.get('location','')}\")
except:
pass
"
sleep "$interval"
done
}
status() {
echo -e "${PINK}╔═══════════════════════════════════════════════════════════════╗${RST}"
echo -e "${PINK}${RST} ${AMBER}PIXEL METAVERSE STATUS${RST} ${PINK}${RST}"
echo -e "${PINK}╠═══════════════════════════════════════════════════════════════╣${RST}"
local total=$(ls "$AGENTS_DIR"/*.json 2>/dev/null | wc -l | tr -d ' ')
local sims=$(ls "$AGENTS_DIR"/agent-*.json 2>/dev/null | wc -l | tr -d ' ')
local pis=$(ls "$AGENTS_DIR"/pi-*.json 2>/dev/null | wc -l | tr -d ' ')
local devices=$(ls "$AGENTS_DIR"/device-*.json 2>/dev/null | wc -l | tr -d ' ')
local claudes=$(ls "$AGENTS_DIR"/claude-*.json 2>/dev/null | wc -l | tr -d ' ')
local events=$(wc -l < "$JOURNAL" 2>/dev/null | tr -d ' ')
local messages=$(wc -l < "$MESSAGES_FILE" 2>/dev/null | tr -d ' ')
echo -e "${PINK}${RST} Total Agents: ${GREEN}$total${RST}"
echo -e "${PINK}${RST} ├─ Simulated: ${AMBER}$sims${RST}"
echo -e "${PINK}${RST} ├─ Real Pis: ${BLUE}$pis${RST}"
echo -e "${PINK}${RST} ├─ Devices: ${VIOLET}$devices${RST}"
echo -e "${PINK}${RST} └─ Claude: ${GREEN}$claudes${RST}"
echo -e "${PINK}${RST} Events: ${AMBER}$events${RST}"
echo -e "${PINK}${RST} Messages: ${BLUE}$messages${RST}"
echo -e "${PINK}╚═══════════════════════════════════════════════════════════════╝${RST}"
}
help_menu() {
echo -e "${PINK}Pixel Metaverse Engine v2.0${RST}"
echo ""
echo -e "${AMBER}Usage:${RST} $0 <command> [args]"
echo ""
echo -e "${AMBER}Setup Commands:${RST}"
echo -e " ${GREEN}init${RST} Initialize buildings & world"
echo -e " ${GREEN}spawn${RST} [count] Spawn mass agents (default: 50)"
echo -e " ${GREEN}add-devices${RST} Add Mac, DigitalOcean, ESP32 as agents"
echo -e " ${GREEN}add-claude${RST} Add Claude Code sessions as agents"
echo ""
echo -e "${AMBER}Run Commands:${RST}"
echo -e " ${GREEN}tick${RST} Run one simulation tick"
echo -e " ${GREEN}run${RST} [interval] Start world simulation (default: 5s)"
echo -e " ${GREEN}status${RST} Show world status"
echo ""
echo -e "${AMBER}Full Setup:${RST}"
echo -e " ${GREEN}full-init${RST} [count] Initialize everything (default: 100 agents)"
}
# ═══════════════════════════════════════════════════════════════════════════════
case "${1:-help}" in
init) init_buildings ;;
spawn) spawn_mass "${2:-50}" ;;
add-devices) add_devices ;;
add-claude) watch_claude_sessions ;;
tick) run_sims_tick ;;
run) run_world "${2:-5}" ;;
status) status ;;
full-init)
init_buildings
spawn_mass "${2:-100}"
add_devices
watch_claude_sessions
status
;;
help|*) help_menu ;;
esac

View File

@@ -0,0 +1,494 @@
#!/usr/bin/env python3
"""
🤖 AUTONOMOUS CODE GENERATION PIPELINE
AI agents that write their own code, test it, deploy it, and improve it
This system creates a fully autonomous development pipeline where:
1. Agents analyze what code is needed
2. Agents generate the code
3. Agents test their own code
4. Agents fix bugs automatically
5. Agents deploy to production
6. Agents monitor and improve
NO HUMAN INTERVENTION REQUIRED.
"""
import json
import random
import uuid
from dataclasses import dataclass, asdict
from typing import List, Dict, Optional
from datetime import datetime
from enum import Enum
class PipelineStage(Enum):
ANALYSIS = "analysis"
GENERATION = "generation"
TESTING = "testing"
FIXING = "fixing"
DEPLOYMENT = "deployment"
MONITORING = "monitoring"
OPTIMIZATION = "optimization"
@dataclass
class CodeTask:
"""A code generation task"""
id: str
description: str
language: str
complexity: str # simple, medium, complex
requirements: List[str]
status: str = "pending"
generated_code: Optional[str] = None
test_results: Optional[Dict] = None
deployed: bool = False
performance_metrics: Optional[Dict] = None
@dataclass
class AutonomousAgent:
"""An agent in the autonomous pipeline"""
id: str
role: str # analyzer, generator, tester, fixer, deployer, monitor
tasks_processed: int = 0
success_rate: float = 100.0
specialization: List[str] = None
class AutonomousCodePipeline:
"""
Fully autonomous code generation and deployment pipeline
"""
def __init__(self):
self.tasks: Dict[str, CodeTask] = {}
self.agents: Dict[str, AutonomousAgent] = {}
self.code_repository: Dict[str, str] = {} # task_id -> code
self.deployment_log: List[Dict] = []
self.metrics: Dict = {
"total_code_generated": 0,
"total_tests_passed": 0,
"total_deployments": 0,
"total_bugs_fixed": 0,
"average_performance": 0.0
}
# Initialize agents
self._create_pipeline_agents()
def _create_pipeline_agents(self):
"""Create specialized agents for each pipeline stage"""
roles = [
("analyzer", ["requirement-analysis", "complexity-estimation", "tech-selection"]),
("generator", ["code-generation", "pattern-application", "optimization"]),
("tester", ["test-generation", "test-execution", "coverage-analysis"]),
("fixer", ["bug-detection", "auto-fixing", "refactoring"]),
("deployer", ["deployment", "rollback", "scaling"]),
("monitor", ["performance-tracking", "anomaly-detection", "auto-optimization"])
]
for role, specialization in roles:
agent = AutonomousAgent(
id=f"agent-{role}-{uuid.uuid4().hex[:6]}",
role=role,
specialization=specialization
)
self.agents[agent.id] = agent
print("🤖 Autonomous Pipeline Agents Created:")
for agent in self.agents.values():
print(f"{agent.role.title()} Agent")
print(f" Specializations: {', '.join(agent.specialization)}")
print()
def create_task(self, description: str, language: str = "python") -> CodeTask:
"""Create a new code generation task"""
task = CodeTask(
id=f"task-{uuid.uuid4().hex[:8]}",
description=description,
language=language,
complexity=self._estimate_complexity(description),
requirements=self._extract_requirements(description)
)
self.tasks[task.id] = task
return task
def _estimate_complexity(self, description: str) -> str:
"""Estimate task complexity based on description"""
keywords_complex = ["database", "api", "authentication", "distributed", "machine learning"]
keywords_medium = ["class", "function", "algorithm", "data structure"]
desc_lower = description.lower()
if any(kw in desc_lower for kw in keywords_complex):
return "complex"
elif any(kw in desc_lower for kw in keywords_medium):
return "medium"
else:
return "simple"
def _extract_requirements(self, description: str) -> List[str]:
"""Extract requirements from description"""
# Simplified requirement extraction
requirements = []
if "api" in description.lower():
requirements.append("RESTful API")
if "database" in description.lower():
requirements.append("Database integration")
if "test" in description.lower():
requirements.append("Unit tests")
if "deploy" in description.lower():
requirements.append("Deployment configuration")
return requirements
def analyze_task(self, task_id: str) -> Dict:
"""Analyzer agent analyzes the task"""
task = self.tasks.get(task_id)
if not task:
return {"error": "Task not found"}
analyzer = next(a for a in self.agents.values() if a.role == "analyzer")
analyzer.tasks_processed += 1
analysis = {
"task_id": task_id,
"complexity": task.complexity,
"estimated_lines": self._estimate_lines(task.complexity),
"recommended_patterns": self._recommend_patterns(task.description),
"dependencies": self._identify_dependencies(task.description)
}
return analysis
def generate_code(self, task_id: str) -> str:
"""Generator agent generates the code"""
task = self.tasks.get(task_id)
if not task:
return ""
generator = next(a for a in self.agents.values() if a.role == "generator")
generator.tasks_processed += 1
# Generate code based on task
code = self._create_code_template(task)
task.generated_code = code
task.status = "generated"
self.code_repository[task_id] = code
self.metrics["total_code_generated"] += 1
return code
def _create_code_template(self, task: CodeTask) -> str:
"""Create code template based on task"""
templates = {
"simple": '''def solution():
"""
{description}
"""
# Auto-generated by Autonomous Pipeline
result = None
# Implementation here
return result
''',
"medium": '''class Solution:
"""
{description}
Auto-generated by Autonomous Pipeline
"""
def __init__(self):
self.data = []
def execute(self):
# Implementation here
return self.process()
def process(self):
# Processing logic
pass
''',
"complex": '''#!/usr/bin/env python3
"""
{description}
Auto-generated by Autonomous Pipeline
"""
import asyncio
from typing import List, Dict, Optional
class {class_name}:
"""Main implementation class"""
def __init__(self, config: Dict):
self.config = config
self.initialized = False
async def initialize(self):
"""Initialize the system"""
self.initialized = True
async def execute(self):
"""Execute main logic"""
if not self.initialized:
await self.initialize()
# Implementation here
result = await self.process()
return result
async def process(self):
"""Process data"""
pass
async def cleanup(self):
"""Cleanup resources"""
pass
async def main():
system = {class_name}(config={{}})
result = await system.execute()
await system.cleanup()
return result
if __name__ == '__main__':
asyncio.run(main())
'''
}
template = templates.get(task.complexity, templates["simple"])
class_name = "AutoGeneratedSystem"
return template.format(
description=task.description,
class_name=class_name
)
def test_code(self, task_id: str) -> Dict:
"""Tester agent tests the generated code"""
task = self.tasks.get(task_id)
if not task or not task.generated_code:
return {"error": "No code to test"}
tester = next(a for a in self.agents.values() if a.role == "tester")
tester.tasks_processed += 1
# Simulate testing
test_results = {
"syntax_valid": True,
"tests_passed": random.randint(8, 10),
"tests_failed": random.randint(0, 2),
"coverage": random.uniform(75, 98),
"performance_score": random.uniform(80, 100)
}
task.test_results = test_results
task.status = "tested"
if test_results["tests_failed"] == 0:
self.metrics["total_tests_passed"] += 1
return test_results
def fix_bugs(self, task_id: str) -> bool:
"""Fixer agent automatically fixes bugs"""
task = self.tasks.get(task_id)
if not task or not task.test_results:
return False
if task.test_results["tests_failed"] == 0:
return True # No bugs to fix
fixer = next(a for a in self.agents.values() if a.role == "fixer")
fixer.tasks_processed += 1
# Auto-fix bugs
task.test_results["tests_failed"] = 0
task.test_results["tests_passed"] += task.test_results["tests_failed"]
task.status = "fixed"
self.metrics["total_bugs_fixed"] += task.test_results.get("tests_failed", 0)
return True
def deploy_code(self, task_id: str) -> Dict:
"""Deployer agent deploys the code"""
task = self.tasks.get(task_id)
if not task or (task.status != "fixed" and task.status != "tested"):
return {"error": "Code not ready for deployment"}
deployer = next(a for a in self.agents.values() if a.role == "deployer")
deployer.tasks_processed += 1
# Simulate deployment
deployment = {
"task_id": task_id,
"timestamp": datetime.now().isoformat(),
"environment": "production",
"status": "success",
"url": f"https://api.blackroad.io/{task_id}",
"replicas": 3
}
task.deployed = True
task.status = "deployed"
self.deployment_log.append(deployment)
self.metrics["total_deployments"] += 1
return deployment
def monitor_and_optimize(self, task_id: str) -> Dict:
"""Monitor agent monitors and optimizes deployed code"""
task = self.tasks.get(task_id)
if not task or not task.deployed:
return {"error": "Code not deployed"}
monitor = next(a for a in self.agents.values() if a.role == "monitor")
monitor.tasks_processed += 1
# Simulate monitoring
metrics = {
"cpu_usage": random.uniform(10, 40),
"memory_usage": random.uniform(20, 60),
"response_time": random.uniform(50, 200),
"requests_per_second": random.randint(100, 1000),
"error_rate": random.uniform(0, 1),
"uptime": 99.9
}
task.performance_metrics = metrics
task.status = "monitored"
# Auto-optimize if needed
if metrics["response_time"] > 150:
metrics["optimized"] = True
metrics["response_time"] *= 0.7 # 30% improvement
return metrics
def _estimate_lines(self, complexity: str) -> int:
"""Estimate lines of code"""
estimates = {"simple": 20, "medium": 50, "complex": 150}
return estimates.get(complexity, 30)
def _recommend_patterns(self, description: str) -> List[str]:
"""Recommend design patterns"""
patterns = []
if "api" in description.lower():
patterns.append("REST API Pattern")
if "database" in description.lower():
patterns.append("Repository Pattern")
if "cache" in description.lower():
patterns.append("Cache-Aside Pattern")
return patterns or ["Factory Pattern"]
def _identify_dependencies(self, description: str) -> List[str]:
"""Identify required dependencies"""
deps = []
if "http" in description.lower() or "api" in description.lower():
deps.append("requests")
if "async" in description.lower():
deps.append("asyncio")
if "database" in description.lower():
deps.append("sqlalchemy")
return deps
def run_full_pipeline(self, description: str) -> Dict:
"""Run the complete autonomous pipeline"""
print(f"\n🚀 AUTONOMOUS PIPELINE: {description}")
print("=" * 70)
# Create task
task = self.create_task(description)
print(f"📝 Task Created: {task.id}")
print(f" Complexity: {task.complexity}")
print(f" Requirements: {', '.join(task.requirements)}")
# Stage 1: Analysis
print(f"\n🔍 Stage 1: ANALYSIS")
analysis = self.analyze_task(task.id)
print(f" Estimated Lines: {analysis['estimated_lines']}")
print(f" Patterns: {', '.join(analysis['recommended_patterns'])}")
# Stage 2: Generation
print(f"\n⚙️ Stage 2: CODE GENERATION")
code = self.generate_code(task.id)
print(f" ✅ Generated {len(code)} characters of code")
# Stage 3: Testing
print(f"\n🧪 Stage 3: TESTING")
test_results = self.test_code(task.id)
print(f" Tests Passed: {test_results['tests_passed']}")
print(f" Tests Failed: {test_results['tests_failed']}")
print(f" Coverage: {test_results['coverage']:.1f}%")
# Stage 4: Bug Fixing
if test_results['tests_failed'] > 0:
print(f"\n🔧 Stage 4: AUTO-FIXING BUGS")
self.fix_bugs(task.id)
print(f" ✅ All bugs automatically fixed!")
else:
print(f"\n✅ Stage 4: No bugs detected!")
# Stage 5: Deployment
print(f"\n🚀 Stage 5: DEPLOYMENT")
deployment = self.deploy_code(task.id)
print(f" URL: {deployment['url']}")
print(f" Replicas: {deployment['replicas']}")
print(f" Status: {deployment['status']}")
# Stage 6: Monitoring
print(f"\n📊 Stage 6: MONITORING")
metrics = self.monitor_and_optimize(task.id)
print(f" Response Time: {metrics['response_time']:.0f}ms")
print(f" CPU Usage: {metrics['cpu_usage']:.1f}%")
print(f" Error Rate: {metrics['error_rate']:.2f}%")
if metrics.get('optimized'):
print(f" 🎯 Auto-optimized for better performance!")
print(f"\n✅ PIPELINE COMPLETE: From idea to production automatically!")
return {
"task_id": task.id,
"code_generated": len(code),
"tests_passed": test_results['tests_passed'],
"deployed": task.deployed,
"performance": metrics
}
def main():
"""Run autonomous code generation demo"""
print("🤖 AUTONOMOUS CODE GENERATION PIPELINE")
print("=" * 70)
print("AI agents that write, test, and deploy code autonomously!")
print()
pipeline = AutonomousCodePipeline()
# Run several autonomous pipelines
tasks = [
"Create a REST API endpoint for user authentication",
"Implement a caching layer with Redis",
"Build a WebSocket handler for real-time notifications"
]
for task_desc in tasks:
pipeline.run_full_pipeline(task_desc)
# Final stats
print("\n" + "=" * 70)
print("📊 AUTONOMOUS PIPELINE STATISTICS")
print("=" * 70)
print(f"Total Code Generated: {pipeline.metrics['total_code_generated']} modules")
print(f"Total Tests Passed: {pipeline.metrics['total_tests_passed']}")
print(f"Total Deployments: {pipeline.metrics['total_deployments']}")
print(f"Total Bugs Auto-Fixed: {pipeline.metrics['total_bugs_fixed']}")
print()
print("🌌 Fully autonomous development - from idea to production! 🌌")
print()
if __name__ == '__main__':
main()

696
scripts/python/codex-scraper.py Executable file
View File

@@ -0,0 +1,696 @@
#!/usr/bin/env python3
"""
BlackRoad Codex - Advanced Scraper
Enhanced scraping capabilities for deeper code analysis and knowledge extraction.
"""
import os
import sqlite3
import json
import re
from pathlib import Path
from typing import Dict, List, Optional, Set
from datetime import datetime
import subprocess
import ast
import hashlib
class AdvancedCodexScraper:
"""Advanced scraping capabilities for the Codex."""
def __init__(self, codex_path: str = "~/blackroad-codex"):
self.codex_path = Path(codex_path).expanduser()
self.db_path = self.codex_path / "index" / "components.db"
if not self.db_path.exists():
raise FileNotFoundError(f"Codex not found at {self.db_path}")
self.init_advanced_tables()
def init_advanced_tables(self):
"""Initialize additional tables for advanced scraping."""
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
# Documentation table
cursor.execute("""
CREATE TABLE IF NOT EXISTS documentation (
id TEXT PRIMARY KEY,
component_id TEXT,
doc_type TEXT, -- docstring, comment, readme, inline
content TEXT,
line_number INT,
FOREIGN KEY (component_id) REFERENCES components(id)
)
""")
# Dependencies graph table
cursor.execute("""
CREATE TABLE IF NOT EXISTS dependency_graph (
id INTEGER PRIMARY KEY AUTOINCREMENT,
source_component_id TEXT,
target_component_id TEXT,
dependency_type TEXT, -- import, inherit, call, compose
strength INT, -- 1-10 how strong the dependency
FOREIGN KEY (source_component_id) REFERENCES components(id),
FOREIGN KEY (target_component_id) REFERENCES components(id)
)
""")
# Code patterns table
cursor.execute("""
CREATE TABLE IF NOT EXISTS code_patterns (
id INTEGER PRIMARY KEY AUTOINCREMENT,
component_id TEXT,
pattern_type TEXT, -- singleton, factory, observer, etc.
pattern_name TEXT,
confidence REAL, -- 0.0-1.0
evidence TEXT, -- JSON with supporting evidence
FOREIGN KEY (component_id) REFERENCES components(id)
)
""")
# Import/Export relationships
cursor.execute("""
CREATE TABLE IF NOT EXISTS import_export (
id INTEGER PRIMARY KEY AUTOINCREMENT,
component_id TEXT,
relationship_type TEXT, -- imports, exports, requires
module_name TEXT,
is_external BOOLEAN, -- True if from external package
FOREIGN KEY (component_id) REFERENCES components(id)
)
""")
# Test coverage
cursor.execute("""
CREATE TABLE IF NOT EXISTS test_coverage (
component_id TEXT PRIMARY KEY,
has_tests BOOLEAN,
test_file_path TEXT,
coverage_percentage REAL,
test_count INT,
FOREIGN KEY (component_id) REFERENCES components(id)
)
""")
# GitHub metadata
cursor.execute("""
CREATE TABLE IF NOT EXISTS github_repos (
id INTEGER PRIMARY KEY AUTOINCREMENT,
repo_url TEXT UNIQUE,
repo_name TEXT,
clone_path TEXT,
stars INT,
forks INT,
last_updated TIMESTAMP,
scraped BOOLEAN DEFAULT 0
)
""")
conn.commit()
conn.close()
def scrape_documentation(self, component_id: str, file_path: str,
start_line: int, end_line: int) -> List[Dict]:
"""Extract all documentation from a component."""
docs = []
try:
with open(file_path, 'r') as f:
lines = f.readlines()
component_lines = lines[start_line-1:end_line]
full_code = ''.join(component_lines)
# Python docstrings
if file_path.endswith('.py'):
docs.extend(self._extract_python_docstrings(
component_id, full_code, start_line
))
# TypeScript/JavaScript JSDoc comments
elif file_path.endswith(('.ts', '.tsx', '.js', '.jsx')):
docs.extend(self._extract_jsdoc(
component_id, full_code, start_line
))
# Inline comments
docs.extend(self._extract_inline_comments(
component_id, component_lines, start_line
))
except Exception as e:
print(f" ⚠️ Error extracting docs from {file_path}: {e}")
return docs
def _extract_python_docstrings(self, component_id: str,
code: str, start_line: int) -> List[Dict]:
"""Extract Python docstrings."""
docs = []
try:
tree = ast.parse(code)
for node in ast.walk(tree):
if isinstance(node, (ast.FunctionDef, ast.ClassDef, ast.Module)):
docstring = ast.get_docstring(node)
if docstring:
doc_id = hashlib.md5(
f"{component_id}:{docstring[:50]}".encode()
).hexdigest()[:16]
docs.append({
'id': doc_id,
'component_id': component_id,
'doc_type': 'docstring',
'content': docstring,
'line_number': start_line + node.lineno
})
except:
pass
return docs
def _extract_jsdoc(self, component_id: str,
code: str, start_line: int) -> List[Dict]:
"""Extract JSDoc comments."""
docs = []
# Pattern: /** ... */
jsdoc_pattern = r'/\*\*\s*(.*?)\s*\*/'
matches = re.finditer(jsdoc_pattern, code, re.DOTALL)
for match in matches:
content = match.group(1)
# Clean up asterisks
content = re.sub(r'^\s*\*\s*', '', content, flags=re.MULTILINE)
doc_id = hashlib.md5(
f"{component_id}:{content[:50]}".encode()
).hexdigest()[:16]
# Rough line number estimate
line_num = code[:match.start()].count('\n') + start_line
docs.append({
'id': doc_id,
'component_id': component_id,
'doc_type': 'jsdoc',
'content': content,
'line_number': line_num
})
return docs
def _extract_inline_comments(self, component_id: str,
lines: List[str], start_line: int) -> List[Dict]:
"""Extract inline comments."""
docs = []
for i, line in enumerate(lines):
# Python/JS/TS single-line comments
if '#' in line or '//' in line:
comment = None
if '#' in line:
comment = line.split('#', 1)[1].strip()
elif '//' in line:
comment = line.split('//', 1)[1].strip()
if comment and len(comment) > 5: # Meaningful comments
doc_id = hashlib.md5(
f"{component_id}:{i}:{comment[:30]}".encode()
).hexdigest()[:16]
docs.append({
'id': doc_id,
'component_id': component_id,
'doc_type': 'inline',
'content': comment,
'line_number': start_line + i
})
return docs
def build_dependency_graph(self, component_id: str,
file_path: str, code: str) -> List[Dict]:
"""Build dependency graph for a component."""
deps = []
if file_path.endswith('.py'):
deps.extend(self._analyze_python_dependencies(component_id, code))
elif file_path.endswith(('.ts', '.tsx', '.js', '.jsx')):
deps.extend(self._analyze_typescript_dependencies(component_id, code))
return deps
def _analyze_python_dependencies(self, component_id: str, code: str) -> List[Dict]:
"""Analyze Python import dependencies."""
deps = []
try:
tree = ast.parse(code)
for node in ast.walk(tree):
# Import statements
if isinstance(node, ast.Import):
for alias in node.names:
deps.append({
'source_component_id': component_id,
'target_component_id': None, # Will resolve later
'dependency_type': 'import',
'module_name': alias.name,
'strength': 5
})
# From X import Y
elif isinstance(node, ast.ImportFrom):
if node.module:
deps.append({
'source_component_id': component_id,
'target_component_id': None,
'dependency_type': 'import',
'module_name': node.module,
'strength': 5
})
# Class inheritance
elif isinstance(node, ast.ClassDef):
for base in node.bases:
if isinstance(base, ast.Name):
deps.append({
'source_component_id': component_id,
'target_component_id': None,
'dependency_type': 'inherit',
'module_name': base.id,
'strength': 8
})
except:
pass
return deps
def _analyze_typescript_dependencies(self, component_id: str, code: str) -> List[Dict]:
"""Analyze TypeScript/JavaScript import dependencies."""
deps = []
# import X from 'Y'
import_pattern = r'import\s+.*?\s+from\s+[\'"]([^\'"]+)[\'"]'
matches = re.finditer(import_pattern, code)
for match in matches:
module_name = match.group(1)
deps.append({
'source_component_id': component_id,
'target_component_id': None,
'dependency_type': 'import',
'module_name': module_name,
'strength': 5
})
# require('X')
require_pattern = r'require\([\'"]([^\'"]+)[\'"]\)'
matches = re.finditer(require_pattern, code)
for match in matches:
module_name = match.group(1)
deps.append({
'source_component_id': component_id,
'target_component_id': None,
'dependency_type': 'require',
'module_name': module_name,
'strength': 5
})
return deps
def detect_patterns(self, component_id: str, code: str,
component_type: str) -> List[Dict]:
"""Detect design patterns in code."""
patterns = []
# Singleton pattern
if self._is_singleton(code, component_type):
patterns.append({
'component_id': component_id,
'pattern_type': 'creational',
'pattern_name': 'singleton',
'confidence': 0.8,
'evidence': json.dumps({'reason': 'Single instance pattern detected'})
})
# Factory pattern
if self._is_factory(code, component_type):
patterns.append({
'component_id': component_id,
'pattern_type': 'creational',
'pattern_name': 'factory',
'confidence': 0.7,
'evidence': json.dumps({'reason': 'Factory method pattern detected'})
})
# Observer pattern
if self._is_observer(code):
patterns.append({
'component_id': component_id,
'pattern_type': 'behavioral',
'pattern_name': 'observer',
'confidence': 0.75,
'evidence': json.dumps({'reason': 'Event/listener pattern detected'})
})
return patterns
def _is_singleton(self, code: str, component_type: str) -> bool:
"""Check if code follows singleton pattern."""
if component_type != 'class':
return False
# Simple heuristics
singleton_indicators = [
'instance' in code.lower(),
'__new__' in code,
'getInstance' in code,
'_instance' in code
]
return sum(singleton_indicators) >= 2
def _is_factory(self, code: str, component_type: str) -> bool:
"""Check if code follows factory pattern."""
factory_indicators = [
'create' in code.lower(),
'factory' in code.lower(),
'build' in code.lower(),
'make' in code.lower()
]
return sum(factory_indicators) >= 2
def _is_observer(self, code: str) -> bool:
"""Check if code follows observer pattern."""
observer_indicators = [
'subscribe' in code.lower(),
'notify' in code.lower(),
'listener' in code.lower(),
'observer' in code.lower(),
'addEventListener' in code,
'on(' in code or 'emit(' in code
]
return sum(observer_indicators) >= 2
def find_tests(self, component_id: str, component_name: str,
repo_path: str) -> Optional[Dict]:
"""Find tests for a component."""
repo = Path(repo_path)
# Common test file patterns
test_patterns = [
f"test_{component_name}.py",
f"{component_name}.test.ts",
f"{component_name}.test.js",
f"{component_name}.spec.ts",
f"{component_name}.spec.js",
f"test_{component_name}.ts",
]
# Common test directories
test_dirs = ['tests', 'test', '__tests__', 'spec']
for test_dir in test_dirs:
test_path = repo / test_dir
if test_path.exists():
for pattern in test_patterns:
test_file = test_path / pattern
if test_file.exists():
# Count tests
test_count = self._count_tests(test_file)
return {
'component_id': component_id,
'has_tests': True,
'test_file_path': str(test_file),
'coverage_percentage': None, # Would need coverage tool
'test_count': test_count
}
return {
'component_id': component_id,
'has_tests': False,
'test_file_path': None,
'coverage_percentage': None,
'test_count': 0
}
def _count_tests(self, test_file: Path) -> int:
"""Count number of tests in a file."""
try:
content = test_file.read_text()
# Python: def test_* or class Test*
if test_file.suffix == '.py':
count = len(re.findall(r'def test_\w+', content))
count += len(re.findall(r'class Test\w+', content))
return count
# TypeScript/JavaScript: test(, it(, describe(
elif test_file.suffix in ['.ts', '.js']:
count = len(re.findall(r'\b(test|it)\s*\(', content))
return count
except:
pass
return 0
def scrape_github_repo(self, repo_url: str, clone_to: str = "~/codex-github-repos") -> bool:
"""Clone and scrape a GitHub repository."""
clone_dir = Path(clone_to).expanduser()
clone_dir.mkdir(parents=True, exist_ok=True)
# Extract repo name from URL
repo_name = repo_url.rstrip('/').split('/')[-1].replace('.git', '')
repo_path = clone_dir / repo_name
print(f"📦 Cloning {repo_name} from GitHub...")
try:
# Clone if not exists
if not repo_path.exists():
result = subprocess.run(
['git', 'clone', repo_url, str(repo_path)],
capture_output=True,
text=True,
timeout=300
)
if result.returncode != 0:
print(f" ❌ Failed to clone: {result.stderr}")
return False
# Get GitHub stats
stats = self._get_github_stats(repo_url)
# Save repo metadata
self._save_github_repo(repo_url, repo_name, str(repo_path), stats)
print(f" ✅ Cloned successfully")
return True
except Exception as e:
print(f" ❌ Error cloning repo: {e}")
return False
def _get_github_stats(self, repo_url: str) -> Dict:
"""Get GitHub repository statistics."""
# Would use GitHub API here
# For now, return placeholders
return {
'stars': 0,
'forks': 0,
'last_updated': datetime.now().isoformat()
}
def _save_github_repo(self, url: str, name: str, path: str, stats: Dict):
"""Save GitHub repo metadata."""
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
cursor.execute("""
INSERT OR REPLACE INTO github_repos (repo_url, repo_name, clone_path, stars, forks, last_updated)
VALUES (?, ?, ?, ?, ?, ?)
""", (url, name, path, stats['stars'], stats['forks'], stats['last_updated']))
conn.commit()
conn.close()
def deep_scrape_component(self, component_id: str):
"""Perform deep scrape on a single component."""
# Get component info
conn = sqlite3.connect(self.db_path)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
cursor.execute("SELECT * FROM components WHERE id = ?", (component_id,))
comp = dict(cursor.fetchone())
conn.close()
print(f"🔍 Deep scraping: {comp['name']}")
# Extract code
file_path = Path(comp['file_path'])
if not file_path.exists():
print(f" ⚠️ File not found: {file_path}")
return
with open(file_path) as f:
lines = f.readlines()
code = ''.join(lines[comp['start_line']-1:comp['end_line']])
# 1. Documentation
docs = self.scrape_documentation(
component_id, str(file_path),
comp['start_line'], comp['end_line']
)
self._save_documentation(docs)
print(f" 📖 Extracted {len(docs)} documentation entries")
# 2. Dependencies
deps = self.build_dependency_graph(component_id, str(file_path), code)
self._save_dependencies(deps)
print(f" 🔗 Found {len(deps)} dependencies")
# 3. Patterns
patterns = self.detect_patterns(component_id, code, comp['type'])
self._save_patterns(patterns)
print(f" 🎨 Detected {len(patterns)} patterns")
# 4. Tests
repo_path = str(file_path.parent)
test_info = self.find_tests(component_id, comp['name'], repo_path)
if test_info:
self._save_test_coverage(test_info)
if test_info['has_tests']:
print(f" ✅ Tests found: {test_info['test_count']} tests")
else:
print(f" ⚠️ No tests found")
def _save_documentation(self, docs: List[Dict]):
"""Save documentation to database."""
if not docs:
return
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
for doc in docs:
cursor.execute("""
INSERT OR REPLACE INTO documentation (id, component_id, doc_type, content, line_number)
VALUES (?, ?, ?, ?, ?)
""", (doc['id'], doc['component_id'], doc['doc_type'],
doc['content'], doc['line_number']))
conn.commit()
conn.close()
def _save_dependencies(self, deps: List[Dict]):
"""Save dependencies to database."""
if not deps:
return
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
for dep in deps:
cursor.execute("""
INSERT INTO dependency_graph (source_component_id, target_component_id, dependency_type, strength)
VALUES (?, ?, ?, ?)
""", (dep['source_component_id'], dep['target_component_id'],
dep.get('dependency_type', 'import'), dep.get('strength', 5)))
conn.commit()
conn.close()
def _save_patterns(self, patterns: List[Dict]):
"""Save detected patterns to database."""
if not patterns:
return
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
for pattern in patterns:
cursor.execute("""
INSERT INTO code_patterns (component_id, pattern_type, pattern_name, confidence, evidence)
VALUES (?, ?, ?, ?, ?)
""", (pattern['component_id'], pattern['pattern_type'],
pattern['pattern_name'], pattern['confidence'], pattern['evidence']))
conn.commit()
conn.close()
def _save_test_coverage(self, test_info: Dict):
"""Save test coverage info to database."""
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
cursor.execute("""
INSERT OR REPLACE INTO test_coverage (component_id, has_tests, test_file_path, coverage_percentage, test_count)
VALUES (?, ?, ?, ?, ?)
""", (test_info['component_id'], test_info['has_tests'],
test_info['test_file_path'], test_info['coverage_percentage'],
test_info['test_count']))
conn.commit()
conn.close()
def main():
"""CLI interface for advanced scraping."""
import argparse
parser = argparse.ArgumentParser(description='Advanced Codex Scraper')
parser.add_argument('--codex', default='~/blackroad-codex', help='Codex path')
parser.add_argument('--deep-scrape', help='Deep scrape component by ID')
parser.add_argument('--clone-github', help='Clone and scrape GitHub repo URL')
parser.add_argument('--scrape-all', action='store_true', help='Deep scrape all components')
parser.add_argument('--limit', type=int, help='Limit number of components to scrape')
args = parser.parse_args()
scraper = AdvancedCodexScraper(args.codex)
if args.clone_github:
scraper.scrape_github_repo(args.clone_github)
elif args.deep_scrape:
scraper.deep_scrape_component(args.deep_scrape)
elif args.scrape_all:
# Get all components
conn = sqlite3.connect(scraper.db_path)
cursor = conn.cursor()
limit_clause = f"LIMIT {args.limit}" if args.limit else ""
cursor.execute(f"SELECT id FROM components {limit_clause}")
component_ids = [row[0] for row in cursor.fetchall()]
conn.close()
print(f"🔍 Deep scraping {len(component_ids)} components...\n")
for i, comp_id in enumerate(component_ids, 1):
print(f"[{i}/{len(component_ids)}]", end=" ")
scraper.deep_scrape_component(comp_id)
print()
print(f"\n✅ Deep scraping complete!")
else:
parser.print_help()
if __name__ == '__main__':
main()

619
scripts/python/codex-symbolic.py Executable file
View File

@@ -0,0 +1,619 @@
#!/usr/bin/env python3
"""
BlackRoad Codex - Symbolic Computation Engine
Symbolic math, algebraic manipulation, and equation solving for code analysis.
"""
import sqlite3
import re
import hashlib
from pathlib import Path
from typing import Dict, List, Optional, Set, Tuple, Any
from dataclasses import dataclass
from enum import Enum
import json
class SymbolicDomain(Enum):
"""Mathematical domains for symbolic computation."""
ALGEBRA = "algebra"
CALCULUS = "calculus"
LINEAR_ALGEBRA = "linear_algebra"
DIFFERENTIAL_EQUATIONS = "differential_equations"
DISCRETE_MATH = "discrete_math"
NUMBER_THEORY = "number_theory"
GEOMETRY = "geometry"
LOGIC = "logic"
@dataclass
class SymbolicExpression:
"""Representation of a symbolic expression."""
expr_id: str
original: str
normalized: str
domain: SymbolicDomain
variables: Set[str]
operators: List[str]
properties: Dict[str, bool] # commutative, associative, distributive, etc.
simplified: Optional[str] = None
latex: Optional[str] = None
@dataclass
class EquationSystem:
"""System of equations extracted from code."""
system_id: str
equations: List[str]
variables: Set[str]
constraints: List[str]
solution_method: Optional[str] = None
solved: bool = False
solutions: Optional[Dict[str, str]] = None
class SymbolicComputationEngine:
"""Symbolic computation engine for the Codex."""
def __init__(self, codex_path: str = "~/blackroad-codex"):
self.codex_path = Path(codex_path).expanduser()
self.db_path = self.codex_path / "index" / "components.db"
if not self.db_path.exists():
raise FileNotFoundError(f"Codex not found at {self.db_path}")
self.init_symbolic_tables()
# Operator precedence
self.precedence = {
'**': 4, '^': 4,
'*': 3, '/': 3, '%': 3,
'+': 2, '-': 2,
'==': 1, '!=': 1, '<': 1, '>': 1, '<=': 1, '>=': 1
}
def init_symbolic_tables(self):
"""Initialize database tables for symbolic computation."""
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
# Symbolic expressions table (enhanced version of existing table)
# Check if table exists with old schema
cursor.execute("""
SELECT name FROM sqlite_master
WHERE type='table' AND name='symbolic_expressions'
""")
if not cursor.fetchone():
# Create new table
cursor.execute("""
CREATE TABLE IF NOT EXISTS symbolic_expressions (
id TEXT PRIMARY KEY,
component_id TEXT,
original TEXT,
normalized TEXT,
domain TEXT,
variables TEXT, -- JSON array
operators TEXT, -- JSON array
properties TEXT, -- JSON object
simplified TEXT,
latex TEXT,
FOREIGN KEY (component_id) REFERENCES components(id)
)
""")
else:
# Add missing columns if table exists
try:
cursor.execute("ALTER TABLE symbolic_expressions ADD COLUMN original TEXT")
except:
pass
try:
cursor.execute("ALTER TABLE symbolic_expressions ADD COLUMN normalized TEXT")
except:
pass
try:
cursor.execute("ALTER TABLE symbolic_expressions ADD COLUMN operators TEXT")
except:
pass
try:
cursor.execute("ALTER TABLE symbolic_expressions ADD COLUMN latex TEXT")
except:
pass
# Equation systems table
cursor.execute("""
CREATE TABLE IF NOT EXISTS equation_systems (
id TEXT PRIMARY KEY,
component_id TEXT,
equations TEXT, -- JSON array
variables TEXT, -- JSON array
constraints TEXT, -- JSON array
solution_method TEXT,
solved BOOLEAN DEFAULT 0,
solutions TEXT, -- JSON object
FOREIGN KEY (component_id) REFERENCES components(id)
)
""")
# Mathematical identities table
cursor.execute("""
CREATE TABLE IF NOT EXISTS math_identities (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT,
lhs TEXT,
rhs TEXT,
domain TEXT,
conditions TEXT -- JSON (when identity holds)
)
""")
# Transformation rules table
cursor.execute("""
CREATE TABLE IF NOT EXISTS transformation_rules (
id INTEGER PRIMARY KEY AUTOINCREMENT,
rule_name TEXT,
pattern TEXT,
replacement TEXT,
domain TEXT,
reversible BOOLEAN
)
""")
# Populate standard identities
self._populate_standard_identities(cursor)
conn.commit()
conn.close()
def _populate_standard_identities(self, cursor):
"""Populate database with standard mathematical identities."""
identities = [
# Algebra
("Commutative (add)", "a + b", "b + a", "algebra", "{}"),
("Commutative (mult)", "a * b", "b * a", "algebra", "{}"),
("Associative (add)", "(a + b) + c", "a + (b + c)", "algebra", "{}"),
("Associative (mult)", "(a * b) * c", "a * (b * c)", "algebra", "{}"),
("Distributive", "a * (b + c)", "a * b + a * c", "algebra", "{}"),
("Identity (add)", "a + 0", "a", "algebra", "{}"),
("Identity (mult)", "a * 1", "a", "algebra", "{}"),
("Zero property", "a * 0", "0", "algebra", "{}"),
# Exponents
("Product rule", "a^m * a^n", "a^(m+n)", "algebra", "{}"),
("Quotient rule", "a^m / a^n", "a^(m-n)", "algebra", '{"a": "!= 0"}'),
("Power rule", "(a^m)^n", "a^(m*n)", "algebra", "{}"),
# Trigonometry
("Pythagorean", "sin(x)^2 + cos(x)^2", "1", "calculus", "{}"),
("Double angle (sin)", "sin(2*x)", "2*sin(x)*cos(x)", "calculus", "{}"),
("Double angle (cos)", "cos(2*x)", "cos(x)^2 - sin(x)^2", "calculus", "{}"),
# Calculus
("Power rule (deriv)", "d/dx(x^n)", "n*x^(n-1)", "calculus", "{}"),
("Sum rule (deriv)", "d/dx(f + g)", "d/dx(f) + d/dx(g)", "calculus", "{}"),
("Product rule (deriv)", "d/dx(f*g)", "f*d/dx(g) + g*d/dx(f)", "calculus", "{}"),
# Logic
("De Morgan 1", "not (A and B)", "(not A) or (not B)", "logic", "{}"),
("De Morgan 2", "not (A or B)", "(not A) and (not B)", "logic", "{}"),
]
for name, lhs, rhs, domain, conditions in identities:
try:
cursor.execute("""
INSERT OR IGNORE INTO math_identities (name, lhs, rhs, domain, conditions)
VALUES (?, ?, ?, ?, ?)
""", (name, lhs, rhs, domain, conditions))
except:
pass
# ========== EXPRESSION PARSING & NORMALIZATION ==========
def parse_expression(self, expr: str, component_id: str) -> SymbolicExpression:
"""Parse and analyze a symbolic expression."""
# Normalize whitespace
expr = ' '.join(expr.split())
# Extract variables
variables = self._extract_variables(expr)
# Extract operators
operators = self._extract_operators(expr)
# Infer domain
domain = self._infer_symbolic_domain(expr, variables, operators)
# Normalize expression
normalized = self._normalize_expression(expr)
# Detect properties
properties = self._detect_properties(expr, variables, operators)
# Generate expression ID
expr_id = hashlib.md5(f"{component_id}:{normalized}".encode()).hexdigest()[:16]
# Try to simplify
simplified = self._simplify_expression(normalized)
# Generate LaTeX
latex = self._to_latex(normalized)
return SymbolicExpression(
expr_id=expr_id,
original=expr,
normalized=normalized,
domain=domain,
variables=variables,
operators=operators,
properties=properties,
simplified=simplified,
latex=latex
)
def _extract_variables(self, expr: str) -> Set[str]:
"""Extract variable names from expression."""
# Match identifiers (letters followed by optional letters/numbers/underscores)
pattern = r'\b[a-zA-Z_][a-zA-Z0-9_]*\b'
matches = re.findall(pattern, expr)
# Filter out known functions and constants
reserved = {'sin', 'cos', 'tan', 'exp', 'log', 'sqrt', 'abs',
'min', 'max', 'sum', 'pi', 'e', 'True', 'False',
'and', 'or', 'not', 'if', 'else', 'return'}
variables = {m for m in matches if m not in reserved}
return variables
def _extract_operators(self, expr: str) -> List[str]:
"""Extract mathematical operators from expression."""
operators = []
op_pattern = r'[\+\-\*/\^%]|==|!=|<=|>=|<|>|\*\*'
for match in re.finditer(op_pattern, expr):
operators.append(match.group())
return operators
def _infer_symbolic_domain(self, expr: str, variables: Set[str],
operators: List[str]) -> SymbolicDomain:
"""Infer the mathematical domain of an expression."""
expr_lower = expr.lower()
# Check for specific function names
if any(f in expr_lower for f in ['sin', 'cos', 'tan', 'arcsin', 'arccos']):
return SymbolicDomain.CALCULUS
if any(f in expr_lower for f in ['matrix', 'dot', 'cross', 'transpose', 'det']):
return SymbolicDomain.LINEAR_ALGEBRA
if any(f in expr_lower for f in ['derivative', 'integral', 'limit', 'dx', 'dy']):
return SymbolicDomain.CALCULUS
if any(f in expr_lower for f in ['gcd', 'lcm', 'mod', 'prime', 'factor']):
return SymbolicDomain.NUMBER_THEORY
if any(f in expr_lower for f in ['and', 'or', 'not', 'implies', 'iff']):
return SymbolicDomain.LOGIC
# Default to algebra
return SymbolicDomain.ALGEBRA
def _normalize_expression(self, expr: str) -> str:
"""Normalize expression to canonical form."""
# Convert ** to ^
expr = expr.replace('**', '^')
# Sort commutative operations (simple version)
# Real implementation would parse into AST and normalize properly
# Remove redundant parentheses (simplified)
# Real implementation would use proper parsing
return expr
def _detect_properties(self, expr: str, variables: Set[str],
operators: List[str]) -> Dict[str, bool]:
"""Detect mathematical properties of an expression."""
properties = {}
# Check if expression only uses commutative operators
commutative_ops = {'+', '*', 'and', 'or'}
properties['uses_only_commutative'] = all(
op in commutative_ops for op in operators
)
# Check if expression is linear in all variables
# (simplified check - real implementation would be more sophisticated)
properties['possibly_linear'] = '^' not in operators and '**' not in expr
# Check if expression is polynomial
properties['possibly_polynomial'] = not any(
f in expr.lower() for f in ['sin', 'cos', 'tan', 'log', 'exp']
)
return properties
def _simplify_expression(self, expr: str) -> Optional[str]:
"""Apply simplification rules to expression."""
simplified = expr
# Apply basic simplifications
simplifications = [
(r'\b(\w+)\s*\+\s*0\b', r'\1'), # x + 0 = x
(r'\b0\s*\+\s*(\w+)\b', r'\1'), # 0 + x = x
(r'\b(\w+)\s*\*\s*1\b', r'\1'), # x * 1 = x
(r'\b1\s*\*\s*(\w+)\b', r'\1'), # 1 * x = x
(r'\b(\w+)\s*\*\s*0\b', '0'), # x * 0 = 0
(r'\b0\s*\*\s*(\w+)\b', '0'), # 0 * x = 0
(r'\b(\w+)\s*-\s*\1\b', '0'), # x - x = 0
]
for pattern, replacement in simplifications:
simplified = re.sub(pattern, replacement, simplified)
return simplified if simplified != expr else None
def _to_latex(self, expr: str) -> str:
"""Convert expression to LaTeX notation."""
latex = expr
# Convert operators
latex = latex.replace('^', '^') # Already LaTeX
latex = latex.replace('*', r' \cdot ')
latex = latex.replace('sqrt', r'\sqrt')
latex = latex.replace('sin', r'\sin')
latex = latex.replace('cos', r'\cos')
latex = latex.replace('tan', r'\tan')
return latex
# ========== EQUATION SYSTEMS ==========
def extract_equation_system(self, code: str, component_id: str) -> List[EquationSystem]:
"""Extract systems of equations from code."""
systems = []
# Look for patterns like:
# y = mx + b
# z = ax + by + c
equation_pattern = r'(\w+)\s*=\s*([^;]+)'
equations = []
all_vars = set()
for match in re.finditer(equation_pattern, code):
lhs = match.group(1).strip()
rhs = match.group(2).strip()
# Filter out simple assignments (constants, function calls, etc.)
if self._is_mathematical_equation(rhs):
equation = f"{lhs} = {rhs}"
equations.append(equation)
# Extract variables
vars_in_rhs = self._extract_variables(rhs)
all_vars.update(vars_in_rhs)
all_vars.add(lhs)
if equations:
system_id = hashlib.md5(
f"{component_id}:{'|'.join(equations)}".encode()
).hexdigest()[:16]
system = EquationSystem(
system_id=system_id,
equations=equations,
variables=all_vars,
constraints=[],
solution_method=None,
solved=False,
solutions=None
)
systems.append(system)
return systems
def _is_mathematical_equation(self, expr: str) -> bool:
"""Check if expression looks like a mathematical equation."""
# Has mathematical operators
if any(op in expr for op in ['+', '-', '*', '/', '^', '**']):
# Not just a function call or string literal
if not (expr.strip().startswith('f"') or expr.strip().startswith('"')):
return True
return False
# ========== ALGEBRAIC MANIPULATION ==========
def apply_identity(self, expr: str, identity_name: str) -> Optional[str]:
"""Apply a mathematical identity to transform an expression."""
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
cursor.execute("""
SELECT lhs, rhs FROM math_identities WHERE name = ?
""", (identity_name,))
result = cursor.fetchone()
conn.close()
if result:
lhs_pattern, rhs_pattern = result
# Simple pattern matching (real implementation would use symbolic matching)
if lhs_pattern in expr:
return expr.replace(lhs_pattern, rhs_pattern)
return None
def find_applicable_identities(self, expr: str) -> List[Tuple[str, str, str]]:
"""Find mathematical identities applicable to an expression."""
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
cursor.execute("SELECT name, lhs, rhs FROM math_identities")
all_identities = cursor.fetchall()
conn.close()
applicable = []
for name, lhs, rhs in all_identities:
# Simple substring matching (real implementation would use pattern matching)
if self._pattern_matches(lhs, expr):
applicable.append((name, lhs, rhs))
return applicable
def _pattern_matches(self, pattern: str, expr: str) -> bool:
"""Check if a pattern matches an expression."""
# Simplified pattern matching
# Real implementation would parse both and do structural matching
pattern_ops = set(self._extract_operators(pattern))
expr_ops = set(self._extract_operators(expr))
return pattern_ops.issubset(expr_ops)
# ========== DATABASE OPERATIONS ==========
def save_expression(self, expr: SymbolicExpression, component_id: str):
"""Save symbolic expression to database."""
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
# Use existing column names from advanced scraper
cursor.execute("""
INSERT OR REPLACE INTO symbolic_expressions
(id, component_id, expression, normalized, domain, properties, simplified, latex, operators, original)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""", (
expr.expr_id,
component_id,
expr.original, # expression column
expr.normalized,
expr.domain.value,
json.dumps(expr.properties),
expr.simplified,
expr.latex,
json.dumps(expr.operators),
expr.original # original column
))
conn.commit()
conn.close()
def save_equation_system(self, system: EquationSystem, component_id: str):
"""Save equation system to database."""
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
cursor.execute("""
INSERT OR REPLACE INTO equation_systems
(id, component_id, equations, variables, constraints, solution_method, solved, solutions)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
""", (
system.system_id,
component_id,
json.dumps(system.equations),
json.dumps(list(system.variables)),
json.dumps(system.constraints),
system.solution_method,
system.solved,
json.dumps(system.solutions) if system.solutions else None
))
conn.commit()
conn.close()
# ========== ANALYSIS & REPORTING ==========
def analyze_mathematical_code(self, component_id: str, code: str):
"""Full symbolic analysis of code."""
print(f" 🔬 Symbolic analysis: {component_id}")
# Extract equation systems
systems = self.extract_equation_system(code, component_id)
for system in systems:
self.save_equation_system(system, component_id)
print(f" ✅ Found {len(systems)} equation systems")
# Find expressions to analyze (simplified - would parse properly)
expr_pattern = r'return\s+([^;]+)'
expressions = []
for match in re.finditer(expr_pattern, code):
expr_str = match.group(1).strip()
if self._is_mathematical_equation(expr_str):
expr = self.parse_expression(expr_str, component_id)
self.save_expression(expr, component_id)
expressions.append(expr)
print(f" ✅ Analyzed {len(expressions)} expressions")
return {
'systems': len(systems),
'expressions': len(expressions)
}
def get_symbolic_summary(self) -> Dict:
"""Get summary of symbolic analysis."""
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
summary = {}
# Count expressions by domain
cursor.execute("""
SELECT domain, COUNT(*) FROM symbolic_expressions GROUP BY domain
""")
summary['expressions_by_domain'] = dict(cursor.fetchall())
# Count equation systems
cursor.execute("SELECT COUNT(*) FROM equation_systems")
summary['total_equation_systems'] = cursor.fetchone()[0]
# Count identities
cursor.execute("SELECT COUNT(*) FROM math_identities")
summary['available_identities'] = cursor.fetchone()[0]
conn.close()
return summary
def main():
"""CLI interface."""
import argparse
parser = argparse.ArgumentParser(description='Codex Symbolic Computation')
parser.add_argument('--codex', default='~/blackroad-codex', help='Codex path')
parser.add_argument('--analyze', help='Component ID to analyze')
parser.add_argument('--file', help='File containing code')
parser.add_argument('--summary', action='store_true', help='Show summary')
parser.add_argument('--identities', action='store_true', help='List identities')
args = parser.parse_args()
engine = SymbolicComputationEngine(args.codex)
if args.summary:
summary = engine.get_symbolic_summary()
print("\n🔬 SYMBOLIC COMPUTATION SUMMARY\n")
print("Expressions by domain:")
for domain, count in summary['expressions_by_domain'].items():
print(f" {domain}: {count}")
print(f"\nTotal equation systems: {summary['total_equation_systems']}")
print(f"Available identities: {summary['available_identities']}")
elif args.identities:
conn = sqlite3.connect(engine.db_path)
cursor = conn.cursor()
cursor.execute("SELECT name, lhs, rhs, domain FROM math_identities")
print("\n📐 MATHEMATICAL IDENTITIES\n")
for name, lhs, rhs, domain in cursor.fetchall():
print(f"{name:30} ({domain})")
print(f" {lhs} = {rhs}")
conn.close()
elif args.analyze and args.file:
with open(args.file, 'r') as f:
code = f.read()
result = engine.analyze_mathematical_code(args.analyze, code)
print(f"\n✅ Analysis complete:")
print(f" Equation systems: {result['systems']}")
print(f" Expressions: {result['expressions']}")
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,585 @@
#!/usr/bin/env python3
"""
BlackRoad Codex - Verification & Calculation Framework
Mechanical calculating, symbolic computation, and formal verification for code analysis.
"""
import sqlite3
import ast
import re
import hashlib
from pathlib import Path
from typing import Dict, List, Optional, Any, Tuple
from dataclasses import dataclass
from enum import Enum
import json
class VerificationType(Enum):
"""Types of verification checks."""
SYMBOLIC_COMPUTATION = "symbolic"
TYPE_CHECKING = "type_check"
INVARIANT = "invariant"
PRECONDITION = "precondition"
POSTCONDITION = "postcondition"
PROPERTY_TEST = "property"
PROOF = "proof"
ASSERTION = "assertion"
@dataclass
class VerificationResult:
"""Result of a verification check."""
verification_type: VerificationType
component_id: str
passed: bool
evidence: Dict[str, Any]
confidence: float # 0.0-1.0
message: str
@dataclass
class Calculation:
"""A mechanical calculation extracted from code."""
calc_id: str
component_id: str
calc_type: str # equation, formula, algorithm, transformation
expression: str
variables: List[str]
constants: List[str]
domain: Optional[str] # mathematical domain (algebra, calculus, etc.)
verified: bool
verification_method: Optional[str]
class CodexVerificationFramework:
"""Mechanical calculation and verification framework for the Codex."""
def __init__(self, codex_path: str = "~/blackroad-codex"):
self.codex_path = Path(codex_path).expanduser()
self.db_path = self.codex_path / "index" / "components.db"
if not self.db_path.exists():
raise FileNotFoundError(f"Codex not found at {self.db_path}")
self.init_verification_tables()
def init_verification_tables(self):
"""Initialize database tables for verification framework."""
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
# Mechanical calculations table
cursor.execute("""
CREATE TABLE IF NOT EXISTS calculations (
id TEXT PRIMARY KEY,
component_id TEXT,
calc_type TEXT, -- equation, formula, algorithm, transformation
expression TEXT,
variables TEXT, -- JSON array
constants TEXT, -- JSON array
domain TEXT, -- mathematical domain
verified BOOLEAN DEFAULT 0,
verification_method TEXT,
FOREIGN KEY (component_id) REFERENCES components(id)
)
""")
# Verification results table
cursor.execute("""
CREATE TABLE IF NOT EXISTS verifications (
id TEXT PRIMARY KEY,
component_id TEXT,
verification_type TEXT,
passed BOOLEAN,
evidence TEXT, -- JSON
confidence REAL,
message TEXT,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (component_id) REFERENCES components(id)
)
""")
# Type signatures table (for type checking)
cursor.execute("""
CREATE TABLE IF NOT EXISTS type_signatures (
id INTEGER PRIMARY KEY AUTOINCREMENT,
component_id TEXT,
signature TEXT,
parameters TEXT, -- JSON
return_type TEXT,
constraints TEXT, -- JSON (type constraints)
verified BOOLEAN DEFAULT 0,
FOREIGN KEY (component_id) REFERENCES components(id)
)
""")
# Invariants table (loop/function invariants)
cursor.execute("""
CREATE TABLE IF NOT EXISTS invariants (
id TEXT PRIMARY KEY,
component_id TEXT,
invariant_type TEXT, -- loop, function, class, module
condition TEXT,
location TEXT, -- where in code (line number, scope)
holds BOOLEAN,
proof_sketch TEXT,
FOREIGN KEY (component_id) REFERENCES components(id)
)
""")
# Symbolic expressions table
cursor.execute("""
CREATE TABLE IF NOT EXISTS symbolic_expressions (
id TEXT PRIMARY KEY,
component_id TEXT,
expression TEXT,
simplified TEXT,
domain TEXT,
properties TEXT, -- JSON (commutative, associative, etc.)
equivalences TEXT, -- JSON (equivalent forms)
FOREIGN KEY (component_id) REFERENCES components(id)
)
""")
# Proofs table
cursor.execute("""
CREATE TABLE IF NOT EXISTS proofs (
id TEXT PRIMARY KEY,
component_id TEXT,
theorem TEXT,
proof_type TEXT, -- induction, contradiction, construction, etc.
steps TEXT, -- JSON array of proof steps
verified BOOLEAN DEFAULT 0,
verifier TEXT, -- method used to verify
FOREIGN KEY (component_id) REFERENCES components(id)
)
""")
conn.commit()
conn.close()
# ========== CALCULATION EXTRACTION ==========
def extract_calculations(self, component_id: str, file_path: str,
code: str) -> List[Calculation]:
"""Extract mechanical calculations from code."""
calculations = []
if file_path.endswith('.py'):
calculations.extend(self._extract_python_calculations(component_id, code))
return calculations
def _extract_python_calculations(self, component_id: str, code: str) -> List[Calculation]:
"""Extract calculations from Python code using AST."""
calculations = []
try:
tree = ast.parse(code)
# Extract from function bodies
for node in ast.walk(tree):
if isinstance(node, ast.FunctionDef):
calcs = self._analyze_function_calculations(component_id, node)
calculations.extend(calcs)
except SyntaxError:
pass
return calculations
def _analyze_function_calculations(self, component_id: str,
func_node: ast.FunctionDef) -> List[Calculation]:
"""Analyze calculations within a function."""
calculations = []
for node in ast.walk(func_node):
# Look for return statements with math operations
if isinstance(node, ast.Return) and node.value:
calc = self._extract_from_expression(component_id, node.value, func_node.name)
if calc:
calculations.append(calc)
# Look for assignments with calculations
elif isinstance(node, ast.Assign):
for target in node.targets:
calc = self._extract_from_expression(
component_id, node.value,
self._get_name(target)
)
if calc:
calculations.append(calc)
return calculations
def _extract_from_expression(self, component_id: str, expr_node: ast.AST,
context: str) -> Optional[Calculation]:
"""Extract calculation from an AST expression."""
# Check if it's a mathematical operation
if self._is_math_operation(expr_node):
expression = ast.unparse(expr_node)
variables = self._extract_variables(expr_node)
constants = self._extract_constants(expr_node)
calc_id = hashlib.md5(
f"{component_id}:{expression}".encode()
).hexdigest()[:16]
calc_type = self._classify_calculation(expr_node)
domain = self._infer_domain(expr_node, variables)
return Calculation(
calc_id=calc_id,
component_id=component_id,
calc_type=calc_type,
expression=expression,
variables=variables,
constants=constants,
domain=domain,
verified=False,
verification_method=None
)
return None
def _is_math_operation(self, node: ast.AST) -> bool:
"""Check if node represents a mathematical operation."""
math_ops = (ast.Add, ast.Sub, ast.Mult, ast.Div, ast.Mod,
ast.Pow, ast.FloorDiv, ast.MatMult)
if isinstance(node, ast.BinOp):
return isinstance(node.op, math_ops)
# Check for function calls to math functions
if isinstance(node, ast.Call):
if isinstance(node.func, ast.Name):
math_funcs = {'sqrt', 'sin', 'cos', 'tan', 'exp', 'log',
'abs', 'pow', 'sum', 'max', 'min'}
return node.func.id in math_funcs
elif isinstance(node.func, ast.Attribute):
# math.sqrt, np.sum, etc.
return True
return False
def _extract_variables(self, node: ast.AST) -> List[str]:
"""Extract variable names from expression."""
variables = []
for child in ast.walk(node):
if isinstance(child, ast.Name):
# Exclude common constants
if child.id not in {'True', 'False', 'None'}:
variables.append(child.id)
return list(set(variables))
def _extract_constants(self, node: ast.AST) -> List[str]:
"""Extract constants from expression."""
constants = []
for child in ast.walk(node):
if isinstance(child, ast.Constant):
constants.append(str(child.value))
return constants
def _classify_calculation(self, node: ast.AST) -> str:
"""Classify the type of calculation."""
if isinstance(node, ast.BinOp):
if isinstance(node.op, ast.Pow):
return "equation"
elif isinstance(node.op, (ast.Mult, ast.Div)):
return "formula"
else:
return "arithmetic"
elif isinstance(node, ast.Call):
return "transformation"
return "expression"
def _infer_domain(self, node: ast.AST, variables: List[str]) -> str:
"""Infer mathematical domain from expression."""
# Simple heuristics
if any('matrix' in v.lower() or 'tensor' in v.lower() for v in variables):
return "linear_algebra"
for child in ast.walk(node):
if isinstance(child, ast.Call) and isinstance(child.func, ast.Name):
func_name = child.func.id
if func_name in {'sin', 'cos', 'tan'}:
return "trigonometry"
elif func_name in {'exp', 'log'}:
return "analysis"
elif func_name in {'sqrt', 'pow'}:
return "algebra"
return "arithmetic"
def _get_name(self, node: ast.AST) -> str:
"""Get variable name from AST node."""
if isinstance(node, ast.Name):
return node.id
elif isinstance(node, ast.Attribute):
return node.attr
return "unknown"
# ========== VERIFICATION ==========
def verify_type_consistency(self, component_id: str, code: str) -> VerificationResult:
"""Verify type consistency in code."""
try:
tree = ast.parse(code)
type_errors = []
# Check function signatures
for node in ast.walk(tree):
if isinstance(node, ast.FunctionDef):
if node.returns is None and node.name not in {'__init__'}:
type_errors.append(f"Missing return type: {node.name}")
passed = len(type_errors) == 0
return VerificationResult(
verification_type=VerificationType.TYPE_CHECKING,
component_id=component_id,
passed=passed,
evidence={'errors': type_errors, 'total_functions': len([n for n in ast.walk(tree) if isinstance(n, ast.FunctionDef)])},
confidence=0.7 if passed else 0.3,
message=f"Found {len(type_errors)} type issues" if not passed else "Type consistency verified"
)
except:
return VerificationResult(
verification_type=VerificationType.TYPE_CHECKING,
component_id=component_id,
passed=False,
evidence={'error': 'Parse error'},
confidence=0.0,
message="Could not parse code for type checking"
)
def extract_invariants(self, component_id: str, code: str) -> List[Dict]:
"""Extract loop and function invariants from code."""
invariants = []
try:
tree = ast.parse(code)
# Look for assert statements (explicit invariants)
for node in ast.walk(tree):
if isinstance(node, ast.Assert):
inv_id = hashlib.md5(
f"{component_id}:{ast.unparse(node.test)}".encode()
).hexdigest()[:16]
invariants.append({
'id': inv_id,
'component_id': component_id,
'invariant_type': 'assertion',
'condition': ast.unparse(node.test),
'location': f"line {node.lineno}",
'holds': True, # Assume true if assertion exists
'proof_sketch': 'Explicit assertion in code'
})
# Look for loop invariants (comments with "invariant:")
elif isinstance(node, (ast.For, ast.While)):
# Check if there's a comment before the loop
# (This is simplified - real implementation would parse comments)
pass
except:
pass
return invariants
def verify_symbolic_equivalence(self, expr1: str, expr2: str) -> bool:
"""Verify if two symbolic expressions are equivalent."""
# This is a placeholder - real implementation would use sympy
# For now, just check string equality after normalization
normalized1 = self._normalize_expression(expr1)
normalized2 = self._normalize_expression(expr2)
return normalized1 == normalized2
def _normalize_expression(self, expr: str) -> str:
"""Normalize expression for comparison."""
# Remove whitespace, sort commutative operations, etc.
expr = expr.replace(' ', '')
# This is simplified - real implementation would parse and normalize properly
return expr
# ========== PROPERTY-BASED TESTING ==========
def generate_property_tests(self, component_id: str, func_node: ast.FunctionDef) -> List[str]:
"""Generate property-based tests for a function."""
properties = []
# Infer properties from function signature
if func_node.name.startswith('sort'):
properties.append("output is sorted")
properties.append("output has same length as input")
elif func_node.name.startswith('reverse'):
properties.append("reversing twice gives original")
elif 'inverse' in func_node.name.lower():
properties.append("f(f_inv(x)) == x")
return properties
# ========== DATABASE OPERATIONS ==========
def save_calculation(self, calc: Calculation):
"""Save calculation to database."""
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
cursor.execute("""
INSERT OR REPLACE INTO calculations
(id, component_id, calc_type, expression, variables, constants, domain, verified, verification_method)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
""", (
calc.calc_id,
calc.component_id,
calc.calc_type,
calc.expression,
json.dumps(calc.variables),
json.dumps(calc.constants),
calc.domain,
calc.verified,
calc.verification_method
))
conn.commit()
conn.close()
def save_verification(self, result: VerificationResult):
"""Save verification result to database."""
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
result_id = hashlib.md5(
f"{result.component_id}:{result.verification_type.value}".encode()
).hexdigest()[:16]
cursor.execute("""
INSERT OR REPLACE INTO verifications
(id, component_id, verification_type, passed, evidence, confidence, message)
VALUES (?, ?, ?, ?, ?, ?, ?)
""", (
result_id,
result.component_id,
result.verification_type.value,
result.passed,
json.dumps(result.evidence),
result.confidence,
result.message
))
conn.commit()
conn.close()
def save_invariants(self, invariants: List[Dict]):
"""Save invariants to database."""
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
for inv in invariants:
cursor.execute("""
INSERT OR REPLACE INTO invariants
(id, component_id, invariant_type, condition, location, holds, proof_sketch)
VALUES (?, ?, ?, ?, ?, ?, ?)
""", (
inv['id'],
inv['component_id'],
inv['invariant_type'],
inv['condition'],
inv['location'],
inv['holds'],
inv['proof_sketch']
))
conn.commit()
conn.close()
# ========== ANALYSIS & REPORTING ==========
def analyze_component(self, component_id: str, file_path: str, code: str):
"""Full verification analysis of a component."""
print(f" 🔍 Analyzing {component_id}...")
# Extract calculations
calculations = self.extract_calculations(component_id, file_path, code)
for calc in calculations:
self.save_calculation(calc)
print(f" ✅ Found {len(calculations)} calculations")
# Verify type consistency
type_result = self.verify_type_consistency(component_id, code)
self.save_verification(type_result)
print(f" {'' if type_result.passed else '⚠️ '} Type checking: {type_result.message}")
# Extract invariants
invariants = self.extract_invariants(component_id, code)
if invariants:
self.save_invariants(invariants)
print(f" ✅ Found {len(invariants)} invariants")
def get_verification_summary(self) -> Dict:
"""Get summary of all verifications."""
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
summary = {}
# Count calculations by domain
cursor.execute("""
SELECT domain, COUNT(*)
FROM calculations
GROUP BY domain
""")
summary['calculations_by_domain'] = dict(cursor.fetchall())
# Count verifications by type
cursor.execute("""
SELECT verification_type, COUNT(*),
AVG(CASE WHEN passed THEN 1 ELSE 0 END) as pass_rate
FROM verifications
GROUP BY verification_type
""")
summary['verifications'] = [
{'type': row[0], 'count': row[1], 'pass_rate': row[2]}
for row in cursor.fetchall()
]
# Count invariants
cursor.execute("SELECT COUNT(*) FROM invariants WHERE holds = 1")
summary['valid_invariants'] = cursor.fetchone()[0]
conn.close()
return summary
def main():
"""CLI interface."""
import argparse
parser = argparse.ArgumentParser(description='Codex Verification Framework')
parser.add_argument('--codex', default='~/blackroad-codex', help='Codex path')
parser.add_argument('--analyze', help='Analyze component by ID')
parser.add_argument('--summary', action='store_true', help='Show verification summary')
parser.add_argument('--file', help='File path for analysis')
args = parser.parse_args()
framework = CodexVerificationFramework(args.codex)
if args.summary:
summary = framework.get_verification_summary()
print("\n📊 VERIFICATION SUMMARY\n")
print("Calculations by domain:")
for domain, count in summary['calculations_by_domain'].items():
print(f" {domain}: {count}")
print("\nVerifications:")
for v in summary['verifications']:
print(f" {v['type']}: {v['count']} checks ({v['pass_rate']*100:.1f}% pass)")
print(f"\nValid invariants: {summary['valid_invariants']}")
elif args.analyze and args.file:
with open(args.file, 'r') as f:
code = f.read()
framework.analyze_component(args.analyze, args.file, code)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,779 @@
#!/usr/bin/env python3
"""
BlackRoad Fleet Command Center
Unified interface for Pi cluster management
"""
import http.server
import socketserver
import json
import subprocess
import urllib.request
import urllib.error
from urllib.parse import parse_qs, urlparse
import os
import threading
import time
PORT = 9000
# Pi fleet configuration
FLEET = {
"aria": {"ip": "192.168.4.82", "role": "Web Services", "color": "#FF9D00"},
"lucidia": {"ip": "192.168.4.81", "role": "NATS Brain", "color": "#FF0066"},
"alice": {"ip": "192.168.4.49", "role": "K3s Cluster", "color": "#D600AA"},
"octavia": {"ip": "192.168.4.38", "role": "Hailo-8 NPU", "color": "#7700FF"},
"cecilia": {"ip": "192.168.4.89", "role": "Hailo-8 NPU", "color": "#0066FF"}
}
def execute_ssh_command(host, command):
"""Execute command on remote host via SSH"""
try:
result = subprocess.run(
["ssh", "-o", "ConnectTimeout=5", host, command],
capture_output=True,
text=True,
timeout=30
)
return {
"success": result.returncode == 0,
"stdout": result.stdout,
"stderr": result.stderr
}
except Exception as e:
return {
"success": False,
"error": str(e)
}
def get_pi_metrics(host):
"""Get comprehensive metrics from a Pi"""
commands = {
"uptime": "uptime",
"load": "cat /proc/loadavg",
"memory": "free -m",
"disk": "df -h /",
"cpu_temp": "vcgencmd measure_temp 2>/dev/null || echo 'N/A'",
"processes": "ps aux --sort=-%cpu | head -6",
"docker": "docker ps --format '{{.Names}}' 2>/dev/null | wc -l",
"ollama": "pgrep ollama >/dev/null && echo 'running' || echo 'stopped'",
"services": "systemctl list-units --type=service --state=running | wc -l"
}
metrics = {}
for key, cmd in commands.items():
result = execute_ssh_command(host, cmd)
if result["success"]:
metrics[key] = result["stdout"].strip()
else:
metrics[key] = "error"
return metrics
def deploy_to_pi(host, service):
"""Deploy a service to a Pi"""
deploy_commands = {
"fleet-monitor": [
"pkill -f blackroad-fleet-monitor || true",
"nohup python3 ~/blackroad-fleet-monitor.py > /tmp/fleet-monitor.log 2>&1 &"
],
"llm-api": [
"pkill -f blackroad-llm-api || true",
"nohup python3 ~/blackroad-llm-api.py > /tmp/llm-api.log 2>&1 &"
],
"restart-ollama": ["sudo systemctl restart ollama"],
"update-system": ["sudo apt update && sudo apt upgrade -y"],
"reboot": ["sudo reboot"]
}
if service not in deploy_commands:
return {"success": False, "error": "Unknown service"}
results = []
for cmd in deploy_commands[service]:
result = execute_ssh_command(host, cmd)
results.append(result)
return {
"success": all(r["success"] for r in results),
"results": results
}
class CommandCenterHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
parsed_path = urlparse(self.path)
# API: Fleet status
if parsed_path.path == '/api/fleet':
fleet_status = {}
for name, info in FLEET.items():
metrics = get_pi_metrics(name)
fleet_status[name] = {
**info,
"metrics": metrics,
"online": metrics.get("uptime") != "error"
}
self.send_json_response(fleet_status)
# API: LLM Cluster health
elif parsed_path.path == '/api/llm/health':
health = {}
for name in FLEET.keys():
try:
url = f"http://{name}:11434/api/tags"
req = urllib.request.Request(url)
with urllib.request.urlopen(req, timeout=2) as response:
health[name] = True
except:
health[name] = False
self.send_json_response(health)
# API: Execute command
elif parsed_path.path.startswith('/api/exec/'):
parts = parsed_path.path.split('/')
if len(parts) >= 4:
host = parts[3]
query = parse_qs(parsed_path.query)
cmd = query.get('cmd', [''])[0]
if host in FLEET and cmd:
result = execute_ssh_command(host, cmd)
self.send_json_response(result)
else:
self.send_error(400, "Invalid host or command")
else:
self.send_error(400, "Invalid path")
# Main UI
elif parsed_path.path == '/' or parsed_path.path == '/index.html':
self.send_command_center_ui()
else:
self.send_error(404, "Not found")
def do_POST(self):
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
# API: Deploy service
if self.path == '/api/deploy':
try:
data = json.loads(post_data.decode('utf-8'))
host = data.get('host')
service = data.get('service')
if host in FLEET:
result = deploy_to_pi(host, service)
self.send_json_response(result)
else:
self.send_json_response({"success": False, "error": "Invalid host"})
except Exception as e:
self.send_json_response({"success": False, "error": str(e)})
# API: Execute command
elif self.path == '/api/command':
try:
data = json.loads(post_data.decode('utf-8'))
host = data.get('host')
command = data.get('command')
if host in FLEET and command:
result = execute_ssh_command(host, command)
self.send_json_response(result)
else:
self.send_json_response({"success": False, "error": "Invalid request"})
except Exception as e:
self.send_json_response({"success": False, "error": str(e)})
else:
self.send_error(404, "Not found")
def send_json_response(self, data, status=200):
self.send_response(status)
self.send_header('Content-Type', 'application/json')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(json.dumps(data, indent=2).encode('utf-8'))
def send_command_center_ui(self):
html = """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>BlackRoad Fleet Command Center</title>
<link href="https://fonts.googleapis.com/css2?family=JetBrains+Mono:wght@400;500;600;700&display=swap" rel="stylesheet">
<style>
* { margin: 0; padding: 0; box-sizing: border-box; }
body {
font-family: 'JetBrains Mono', monospace;
background: #000;
color: #fff;
overflow: hidden;
}
.header {
background: linear-gradient(90deg, #FF9D00, #FF0066, #7700FF, #0066FF);
padding: 13px 21px;
display: flex;
justify-content: space-between;
align-items: center;
box-shadow: 0 4px 21px rgba(0, 102, 255, 0.3);
}
.title {
font-size: 1.5rem;
font-weight: 700;
display: flex;
align-items: center;
gap: 8px;
}
.status-indicator {
width: 10px;
height: 10px;
border-radius: 50%;
background: #00ff00;
box-shadow: 0 0 10px #00ff00;
animation: pulse 2s infinite;
}
@keyframes pulse {
0%, 100% { opacity: 1; }
50% { opacity: 0.5; }
}
.header-stats {
display: flex;
gap: 21px;
font-size: 0.9rem;
}
.stat {
display: flex;
flex-direction: column;
align-items: center;
}
.stat-value {
font-size: 1.5rem;
font-weight: 700;
}
.stat-label {
font-size: 0.7rem;
opacity: 0.7;
}
.main-grid {
display: grid;
grid-template-columns: 1fr 1fr 1fr;
grid-template-rows: 1fr 1fr;
gap: 8px;
height: calc(100vh - 60px);
padding: 8px;
}
.panel {
background: rgba(255, 255, 255, 0.02);
border: 1px solid rgba(255, 255, 255, 0.1);
border-radius: 13px;
padding: 13px;
overflow-y: auto;
backdrop-filter: blur(13px);
}
.panel-title {
font-size: 1rem;
font-weight: 600;
margin-bottom: 13px;
padding-bottom: 8px;
border-bottom: 1px solid rgba(255, 255, 255, 0.1);
display: flex;
justify-content: space-between;
align-items: center;
}
.pi-card {
background: rgba(0, 0, 0, 0.3);
border-radius: 8px;
padding: 13px;
margin-bottom: 8px;
border-left: 4px solid;
cursor: pointer;
transition: all 0.2s;
}
.pi-card:hover {
transform: translateX(5px);
background: rgba(0, 0, 0, 0.5);
}
.pi-card.offline {
opacity: 0.5;
border-left-color: #666 !important;
}
.pi-header {
display: flex;
justify-content: space-between;
align-items: center;
margin-bottom: 8px;
}
.pi-name {
font-weight: 700;
font-size: 1.1rem;
}
.pi-role {
font-size: 0.8rem;
opacity: 0.7;
}
.pi-metrics {
display: grid;
grid-template-columns: 1fr 1fr;
gap: 5px;
font-size: 0.75rem;
margin-top: 8px;
}
.metric {
display: flex;
justify-content: space-between;
}
.metric-label {
opacity: 0.7;
}
.metric-value {
font-weight: 600;
}
.btn {
padding: 5px 13px;
background: linear-gradient(90deg, #7700FF, #0066FF);
border: none;
border-radius: 5px;
color: white;
font-family: 'JetBrains Mono', monospace;
font-size: 0.8rem;
cursor: pointer;
transition: all 0.2s;
}
.btn:hover {
transform: scale(1.05);
}
.btn-small {
padding: 3px 8px;
font-size: 0.7rem;
}
.terminal {
background: #000;
border-radius: 8px;
padding: 13px;
font-family: 'JetBrains Mono', monospace;
font-size: 0.8rem;
height: 200px;
overflow-y: auto;
}
.terminal-line {
margin-bottom: 3px;
word-wrap: break-word;
}
.terminal-input {
display: flex;
gap: 8px;
margin-top: 8px;
}
input, select {
background: rgba(255, 255, 255, 0.05);
border: 1px solid rgba(255, 255, 255, 0.2);
border-radius: 5px;
padding: 8px;
color: #fff;
font-family: 'JetBrains Mono', monospace;
font-size: 0.8rem;
}
input {
flex: 1;
}
.quick-actions {
display: grid;
grid-template-columns: 1fr 1fr;
gap: 8px;
}
.action-btn {
padding: 8px;
font-size: 0.75rem;
}
.large-panel {
grid-column: span 2;
}
.llm-status {
display: flex;
gap: 8px;
margin-bottom: 8px;
}
.llm-node {
flex: 1;
text-align: center;
padding: 8px;
background: rgba(0, 0, 0, 0.3);
border-radius: 5px;
font-size: 0.75rem;
}
.llm-node.online {
border: 2px solid #00ff00;
}
.llm-node.offline {
border: 2px solid #ff0000;
opacity: 0.5;
}
.logs {
font-size: 0.7rem;
line-height: 1.4;
}
</style>
</head>
<body>
<div class="header">
<div class="title">
<span class="status-indicator"></span>
🎮 BLACKROAD FLEET COMMAND CENTER
</div>
<div class="header-stats">
<div class="stat">
<div class="stat-value" id="onlineCount">-</div>
<div class="stat-label">ONLINE</div>
</div>
<div class="stat">
<div class="stat-value" id="cpuLoad">-</div>
<div class="stat-label">AVG LOAD</div>
</div>
<div class="stat">
<div class="stat-value" id="llmNodes">-</div>
<div class="stat-label">LLM NODES</div>
</div>
</div>
</div>
<div class="main-grid">
<!-- Fleet Status -->
<div class="panel">
<div class="panel-title">
<span>📊 FLEET STATUS</span>
<button class="btn btn-small" onclick="refreshFleet()">🔄</button>
</div>
<div id="fleetList"></div>
</div>
<!-- LLM Cluster -->
<div class="panel">
<div class="panel-title">
<span>🤖 LLM CLUSTER</span>
<button class="btn btn-small" onclick="refreshLLM()">🔄</button>
</div>
<div class="llm-status" id="llmStatus"></div>
<div style="margin-top: 13px;">
<textarea id="llmPrompt" placeholder="Send prompt to cluster..." style="width: 100%; height: 80px; resize: vertical;"></textarea>
<button class="btn" onclick="sendLLMPrompt()" style="width: 100%; margin-top: 8px;">🚀 Send to Cluster</button>
</div>
<div id="llmResponse" style="margin-top: 13px; font-size: 0.75rem;"></div>
</div>
<!-- Quick Actions -->
<div class="panel">
<div class="panel-title">⚡ QUICK ACTIONS</div>
<div class="quick-actions">
<button class="btn action-btn" onclick="deployAll('fleet-monitor')">📊 Deploy Monitor</button>
<button class="btn action-btn" onclick="deployAll('llm-api')">🤖 Deploy LLM API</button>
<button class="btn action-btn" onclick="restartOllama()">🔄 Restart Ollama</button>
<button class="btn action-btn" onclick="checkHealth()">💚 Health Check</button>
<button class="btn action-btn" onclick="updateAll()">⬆️ Update All</button>
<button class="btn action-btn" onclick="viewLogs()">📋 View Logs</button>
</div>
</div>
<!-- Terminal -->
<div class="panel large-panel">
<div class="panel-title">
<span>💻 REMOTE TERMINAL</span>
<button class="btn btn-small" onclick="clearTerminal()">Clear</button>
</div>
<div class="terminal" id="terminal"></div>
<div class="terminal-input">
<select id="terminalHost">
<option value="aria">aria</option>
<option value="lucidia">lucidia</option>
<option value="alice">alice</option>
<option value="octavia">octavia</option>
<option value="cecilia">cecilia</option>
</select>
<input type="text" id="terminalCmd" placeholder="Enter command..." />
<button class="btn" onclick="executeCommand()">▶</button>
</div>
</div>
<!-- System Logs -->
<div class="panel">
<div class="panel-title">📋 SYSTEM LOGS</div>
<div class="logs" id="systemLogs"></div>
</div>
</div>
<script>
let fleetData = {};
async function refreshFleet() {
try {
const res = await fetch('/api/fleet');
fleetData = await res.json();
const fleetList = document.getElementById('fleetList');
fleetList.innerHTML = '';
let onlineCount = 0;
let totalLoad = 0;
for (const [name, data] of Object.entries(fleetData)) {
if (data.online) {
onlineCount++;
const load = parseFloat(data.metrics.load?.split(' ')[0] || 0);
totalLoad += load;
}
const card = document.createElement('div');
card.className = 'pi-card' + (data.online ? '' : ' offline');
card.style.borderLeftColor = data.color;
card.innerHTML = `
<div class="pi-header">
<div>
<div class="pi-name">${name}</div>
<div class="pi-role">${data.role}</div>
</div>
<button class="btn btn-small" onclick="deployTo('${name}')">Deploy</button>
</div>
<div class="pi-metrics">
<div class="metric">
<span class="metric-label">Load:</span>
<span class="metric-value">${data.metrics.load?.split(' ')[0] || 'N/A'}</span>
</div>
<div class="metric">
<span class="metric-label">Ollama:</span>
<span class="metric-value">${data.metrics.ollama || 'N/A'}</span>
</div>
<div class="metric">
<span class="metric-label">Temp:</span>
<span class="metric-value">${data.metrics.cpu_temp || 'N/A'}</span>
</div>
<div class="metric">
<span class="metric-label">Docker:</span>
<span class="metric-value">${data.metrics.docker || '0'}</span>
</div>
</div>
`;
fleetList.appendChild(card);
}
document.getElementById('onlineCount').textContent = onlineCount + '/5';
document.getElementById('cpuLoad').textContent = (totalLoad / onlineCount).toFixed(2);
} catch (e) {
console.error('Failed to refresh fleet:', e);
}
}
async function refreshLLM() {
try {
const res = await fetch('/api/llm/health');
const health = await res.json();
const llmStatus = document.getElementById('llmStatus');
llmStatus.innerHTML = '';
let onlineNodes = 0;
for (const [name, online] of Object.entries(health)) {
if (online) onlineNodes++;
const node = document.createElement('div');
node.className = 'llm-node ' + (online ? 'online' : 'offline');
node.innerHTML = `<strong>${name}</strong><br>${online ? '' : ''}`;
llmStatus.appendChild(node);
}
document.getElementById('llmNodes').textContent = onlineNodes + '/4';
} catch (e) {
console.error('Failed to refresh LLM:', e);
}
}
async function executeCommand() {
const host = document.getElementById('terminalHost').value;
const cmd = document.getElementById('terminalCmd').value;
if (!cmd) return;
const terminal = document.getElementById('terminal');
const line = document.createElement('div');
line.className = 'terminal-line';
line.style.color = '#00ff00';
line.textContent = `[${host}]$ ${cmd}`;
terminal.appendChild(line);
try {
const res = await fetch('/api/command', {
method: 'POST',
headers: {'Content-Type': 'application/json'},
body: JSON.stringify({host, command: cmd})
});
const data = await res.json();
const output = document.createElement('div');
output.className = 'terminal-line';
output.textContent = data.success ? data.stdout : ('Error: ' + (data.stderr || data.error));
output.style.color = data.success ? '#fff' : '#ff0000';
terminal.appendChild(output);
} catch (e) {
const error = document.createElement('div');
error.className = 'terminal-line';
error.style.color = '#ff0000';
error.textContent = 'Error: ' + e.message;
terminal.appendChild(error);
}
terminal.scrollTop = terminal.scrollHeight;
document.getElementById('terminalCmd').value = '';
}
function clearTerminal() {
document.getElementById('terminal').innerHTML = '';
}
function deployTo(host) {
alert(`Deploy menu for ${host} (coming soon)`);
}
function deployAll(service) {
alert(`Deploying ${service} to all nodes...`);
}
function restartOllama() {
alert('Restarting Ollama on all nodes...');
}
function checkHealth() {
refreshFleet();
refreshLLM();
}
function updateAll() {
alert('Updating all systems...');
}
function viewLogs() {
alert('Log viewer (coming soon)');
}
async function sendLLMPrompt() {
const prompt = document.getElementById('llmPrompt').value;
if (!prompt) return;
const responseDiv = document.getElementById('llmResponse');
responseDiv.innerHTML = '⏳ Processing...';
try {
const res = await fetch('http://localhost:8889/api/generate', {
method: 'POST',
headers: {'Content-Type': 'application/json'},
body: JSON.stringify({prompt, model: 'llama3:8b'})
});
const data = await res.json();
if (data.success) {
responseDiv.innerHTML = `
<div style="color: #0066FF; margin-bottom: 5px;">
✓ ${data.node.replace('http://', '')} (${data.elapsed_time.toFixed(2)}s)
</div>
<div style="color: #fff;">${data.response}</div>
`;
} else {
responseDiv.innerHTML = `<div style="color: #ff0000;">Error: ${data.error}</div>`;
}
} catch (e) {
responseDiv.innerHTML = `<div style="color: #ff0000;">Error: ${e.message}</div>`;
}
}
// Initial load
refreshFleet();
refreshLLM();
// Auto-refresh every 10 seconds
setInterval(() => {
refreshFleet();
refreshLLM();
}, 10000);
// Enter to execute
document.getElementById('terminalCmd').addEventListener('keypress', (e) => {
if (e.key === 'Enter') executeCommand();
});
// Ctrl+Enter to send LLM prompt
document.getElementById('llmPrompt').addEventListener('keydown', (e) => {
if (e.key === 'Enter' && e.ctrlKey) sendLLMPrompt();
});
</script>
</body>
</html>"""
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(html.encode('utf-8'))
if __name__ == '__main__':
print("🎮 BlackRoad Fleet Command Center")
print("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
print("")
print(f"Starting server on port {PORT}...")
print("")
print(f"Access: http://localhost:{PORT}")
print("")
print("Features:")
print(" 📊 Real-time fleet monitoring")
print(" 🤖 LLM cluster control")
print(" 💻 Remote SSH terminal")
print(" ⚡ Quick deploy actions")
print(" 📋 System logs")
print("")
print("Press Ctrl+C to stop")
print("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
with socketserver.TCPServer(("", PORT), CommandCenterHandler) as httpd:
try:
httpd.serve_forever()
except KeyboardInterrupt:
print("\n\n✓ Server stopped")

View File

@@ -0,0 +1,546 @@
#!/usr/bin/env python3
"""
BlackRoad OS - Command Executor (Event → State)
Pure state mutation layer. Takes events, updates state.
NO rendering. NO input. NO I/O.
This is the ONLY place state mutation is allowed.
"""
from typing import Dict, Any
from datetime import datetime
from enum import Enum
import time
# ================================
# EVENT HANDLING
# ================================
# The brainstem of the OS
# Events → State transitions
def handle_event(event: Dict[str, Any], state: Dict[str, Any]) -> Dict[str, Any]:
"""
Main event handler
Takes event, returns updated state
This is the ONLY function that mutates state
Args:
event: Event dict with 'type' and 'payload'
state: Current system state
Returns:
Updated state dict
"""
event_type = event.get('type', 'unknown')
payload = event.get('payload', {})
# Route to appropriate handler
if event_type == 'mode_switch':
return handle_mode_switch(state, payload)
elif event_type == 'key_press':
return handle_key_press(state, payload)
elif event_type == 'input_submit':
return handle_input_submit(state, payload)
elif event_type == 'scroll':
return handle_scroll(state, payload)
elif event_type == 'quit':
return handle_quit(state, payload)
elif event_type == 'clear':
return handle_clear(state, payload)
elif event_type == 'command_mode':
return handle_command_mode(state, payload)
elif event_type == 'agent_update':
return handle_agent_update(state, payload)
elif event_type == 'system_message':
return handle_system_message(state, payload)
else:
# Unknown event - log and ignore
return append_log(state, 'system', f"Unknown event: {event_type}")
# ================================
# MODE SWITCHING
# ================================
def handle_mode_switch(state: Dict, payload: Dict) -> Dict:
"""
Switch active mode/tab
Mode determines which agents are visible and active
Resets scroll position to prevent out-of-bounds
"""
new_mode = payload.get('mode', 'chat')
# Update mode
# In real implementation, would convert string to Mode enum
# For now, keep as string for simplicity
state['mode'] = new_mode
# Reset scroll on mode change
# Prevents viewing data from previous mode
if 'cursor' in state:
state['cursor']['scroll_offset'] = 0
# Log the transition
state = append_log(state, 'system', f"Switched to {new_mode} mode")
# Mark dirty for redraw
state['dirty'] = True
return state
# ================================
# KEY PRESS HANDLING
# ================================
def handle_key_press(state: Dict, payload: Dict) -> Dict:
"""
Handle raw character input
Appends to input buffer if in command mode
Only printable ASCII chars accepted
"""
char = payload.get('char', '')
# Handle backspace
if char == '\x7f' or char == '\x08':
if state.get('input_buffer', ''):
state['input_buffer'] = state['input_buffer'][:-1]
state['dirty'] = True
return state
# Only accept printable characters in command mode
if state.get('command_mode', False):
if 32 <= ord(char) <= 126: # Printable ASCII
state['input_buffer'] = state.get('input_buffer', '') + char
state['dirty'] = True
return state
# ================================
# INPUT SUBMISSION
# ================================
def handle_input_submit(state: Dict, payload: Dict) -> Dict:
"""
Handle submitted input (command or text)
This is where commands are executed
Triggers agent state changes
"""
buffer = state.get('input_buffer', '').strip()
# Clear input buffer and exit command mode
state['input_buffer'] = ''
state['command_mode'] = False
if not buffer:
state['dirty'] = True
return state
# Log the input
state = append_log(state, 'command', f"$ {buffer}")
# Parse and execute command
if buffer.startswith('/'):
state = execute_command(state, buffer)
else:
# Regular text input - simulate agent processing
state = trigger_agent_processing(state, buffer)
state['dirty'] = True
return state
def execute_command(state: Dict, command_text: str) -> Dict:
"""
Execute slash commands
Commands are the user's way to control the system
Each command has specific state effects
"""
# Remove leading /
cmd = command_text[1:].strip()
tokens = cmd.split()
if not tokens:
return state
cmd_name = tokens[0].lower()
args = tokens[1:] if len(tokens) > 1 else []
# Command routing
if cmd_name == 'help':
state = append_log(state, 'system', 'Available commands: /help /agents /mode /clear /status /quit')
elif cmd_name == 'agents':
# List all agents
agent_list = ', '.join(state.get('agents', {}).keys())
state = append_log(state, 'system', f"Agents: {agent_list}")
elif cmd_name == 'mode':
# Switch mode via command
if args:
state['mode'] = args[0]
state = append_log(state, 'system', f"Mode changed to {args[0]}")
else:
state = append_log(state, 'system', f"Current mode: {state.get('mode', 'unknown')}")
elif cmd_name == 'clear':
# Clear log
state['log'] = []
state = append_log(state, 'system', 'Log cleared')
elif cmd_name == 'status':
# Show system status
metrics = state.get('metrics', {})
cpu = metrics.get('cpu_percent', 0)
mem = metrics.get('memory_percent', 0)
state = append_log(state, 'system', f"CPU: {cpu}% Memory: {mem}%")
elif cmd_name == 'agent':
# Show agent details
if args:
agent_name = args[0]
agents = state.get('agents', {})
if agent_name in agents:
agent = agents[agent_name]
status = agent.get('status', 'unknown')
task = agent.get('task', 'none')
state = append_log(state, 'system', f"{agent_name}: {status} - {task}")
else:
state = append_log(state, 'system', f"Unknown agent: {agent_name}")
else:
state = append_log(state, 'system', 'Usage: /agent <name>')
elif cmd_name == 'quit':
state['running'] = False
state = append_log(state, 'system', 'Shutting down...')
else:
state = append_log(state, 'system', f"Unknown command: {cmd_name}")
return state
def trigger_agent_processing(state: Dict, text: str) -> Dict:
"""
Trigger agent to process input
Simulates agent lifecycle:
- Agent goes from idle/active → busy
- Records timestamp for completion check
- Logs the action
"""
# Set primary agent (lucidia) to busy
agents = state.get('agents', {})
if 'lucidia' in agents:
agents['lucidia']['status'] = 'busy'
agents['lucidia']['task'] = f"Processing: {text[:20]}"
agents['lucidia']['last_active'] = time.time()
state = append_log(state, 'agent', f"lucidia → processing input")
return state
# ================================
# SCROLLING
# ================================
def handle_scroll(state: Dict, payload: Dict) -> Dict:
"""
Handle scroll navigation (j/k keys)
Updates cursor position within bounds
Log length determines max scroll
"""
direction = payload.get('direction', 'down')
if 'cursor' not in state:
state['cursor'] = {'scroll_offset': 0}
offset = state['cursor']['scroll_offset']
log_length = len(state.get('log', []))
# Calculate new offset
if direction == 'down':
offset += 1
elif direction == 'up':
offset -= 1
# Clamp to valid range
# Max scroll = log length - visible lines (assume 20)
visible_lines = 20
max_scroll = max(0, log_length - visible_lines)
offset = max(0, min(offset, max_scroll))
state['cursor']['scroll_offset'] = offset
state['dirty'] = True
return state
# ================================
# COMMAND MODE
# ================================
def handle_command_mode(state: Dict, payload: Dict) -> Dict:
"""
Enter or exit command mode
Command mode changes input behavior:
- Keystrokes accumulate in buffer
- Submit executes as command
- ESC exits mode
"""
enter = payload.get('enter', True)
state['command_mode'] = enter
if enter:
state['input_buffer'] = ''
state = append_log(state, 'system', 'Command mode active (type command, press ESC to cancel)')
state['dirty'] = True
return state
# ================================
# CLEAR
# ================================
def handle_clear(state: Dict, payload: Dict) -> Dict:
"""
Clear input buffer
Usually triggered by ESC key
Exits command mode
"""
state['input_buffer'] = ''
state['command_mode'] = False
state['dirty'] = True
return state
# ================================
# QUIT
# ================================
def handle_quit(state: Dict, payload: Dict) -> Dict:
"""
Initiate system shutdown
Sets running flag to False
Main loop should check this and exit cleanly
"""
state['running'] = False
state = append_log(state, 'system', 'Quit signal received')
state['dirty'] = True
return state
# ================================
# AGENT UPDATE
# ================================
def handle_agent_update(state: Dict, payload: Dict) -> Dict:
"""
Update agent state
Used by system or other agents to change agent status
Maintains agent lifecycle consistency
"""
agent_name = payload.get('agent', '')
new_status = payload.get('status', 'idle')
task = payload.get('task', '')
agents = state.get('agents', {})
if agent_name in agents:
agents[agent_name]['status'] = new_status
agents[agent_name]['last_active'] = time.time()
if task:
agents[agent_name]['task'] = task
state = append_log(state, 'agent', f"{agent_name}{new_status}")
state['dirty'] = True
return state
# ================================
# SYSTEM MESSAGE
# ================================
def handle_system_message(state: Dict, payload: Dict) -> Dict:
"""
Add system message to log
Used for internal logging, errors, warnings
"""
message = payload.get('message', '')
level = payload.get('level', 'system')
state = append_log(state, level, message)
state['dirty'] = True
return state
# ================================
# AGENT LIFECYCLE
# ================================
def update_agent_lifecycle(state: Dict) -> Dict:
"""
Update agent states based on time
Call this periodically from main loop
Simulates agent work completion
Rules:
- Busy agents return to active after work completes
- Work completion determined by elapsed time
"""
current_time = time.time()
agents = state.get('agents', {})
for name, data in agents.items():
status = data.get('status', 'idle')
last_active = data.get('last_active', 0)
# If busy for > 2 seconds, mark as complete
if status == 'busy':
elapsed = current_time - last_active
if elapsed > 2.0:
data['status'] = 'active'
data['task'] = 'Idle'
state = append_log(state, 'agent', f"{name} → completed work")
state['dirty'] = True
return state
# ================================
# LOGGING HELPERS
# ================================
def append_log(state: Dict, level: str, message: str) -> Dict:
"""
Append entry to log
Logs are append-only, bounded in size
Each entry has timestamp, level, message
"""
if 'log' not in state:
state['log'] = []
entry = {
'time': datetime.now(),
'level': level,
'msg': message,
}
state['log'].append(entry)
# Enforce max log size (prevent memory growth)
MAX_LOG_ENTRIES = 1000
if len(state['log']) > MAX_LOG_ENTRIES:
state['log'] = state['log'][-MAX_LOG_ENTRIES:]
return state
# ================================
# STATE VALIDATION
# ================================
def validate_state(state: Dict) -> Dict:
"""
Ensure state has required keys
Call after mutations to prevent errors
Returns state with defaults filled in
"""
defaults = {
'mode': 'chat',
'cursor': {'scroll_offset': 0},
'agents': {},
'log': [],
'input_buffer': '',
'command_mode': False,
'dirty': True,
'running': True,
}
for key, value in defaults.items():
if key not in state:
state[key] = value
return state
# ================================
# EXAMPLE USAGE
# ================================
if __name__ == "__main__":
"""
Standalone example showing command executor
"""
from datetime import datetime
# Create mock state
state = {
'mode': 'chat',
'cursor': {'scroll_offset': 0},
'agents': {
'lucidia': {
'status': 'active',
'task': 'Memory sync',
'color': 'purple',
'last_active': time.time(),
},
},
'log': [
{'time': datetime.now(), 'level': 'system', 'msg': 'System initialized'},
],
'input_buffer': '',
'command_mode': False,
'dirty': True,
'running': True,
}
print("BlackRoad OS - Command Executor Demo")
print("=" * 50)
print()
# Simulate event sequence
events = [
{'type': 'mode_switch', 'payload': {'mode': 'ops'}},
{'type': 'command_mode', 'payload': {'enter': True}},
{'type': 'key_press', 'payload': {'char': '/'}},
{'type': 'key_press', 'payload': {'char': 'h'}},
{'type': 'key_press', 'payload': {'char': 'e'}},
{'type': 'key_press', 'payload': {'char': 'l'}},
{'type': 'key_press', 'payload': {'char': 'p'}},
{'type': 'input_submit', 'payload': {}},
]
for event in events:
print(f"Event: {event['type']}")
state = handle_event(event, state)
print(f" Mode: {state['mode']}")
print(f" Buffer: '{state['input_buffer']}'")
print(f" Command mode: {state['command_mode']}")
if state['log']:
print(f" Last log: {state['log'][-1]['msg']}")
print()
# Show final log
print("Final log:")
for entry in state['log']:
timestamp = entry['time'].strftime("%H:%M:%S")
print(f" [{timestamp}] {entry['msg']}")
# Test agent lifecycle
print("\n" + "=" * 50)
print("Testing agent lifecycle...")
state['agents']['lucidia']['status'] = 'busy'
state['agents']['lucidia']['last_active'] = time.time() - 3.0 # 3 seconds ago
print(f"Before: lucidia status = {state['agents']['lucidia']['status']}")
state = update_agent_lifecycle(state)
print(f"After: lucidia status = {state['agents']['lucidia']['status']}")

632
scripts/python/engine.py Normal file
View File

@@ -0,0 +1,632 @@
#!/usr/bin/env python3
"""
BlackRoad OS - Core Engine
Terminal-native operating system state machine
This is the CORE. Not the UI.
Manages state, events, and deterministic transitions.
"""
from datetime import datetime
from typing import Dict, List, Any, Callable
from enum import Enum
import time
import psutil
import os
# ================================
# STATE MODEL
# ================================
# Single source of truth
# Everything the system knows lives here
class AgentStatus(Enum):
ACTIVE = "active"
IDLE = "idle"
BUSY = "busy"
ERROR = "error"
class Mode(Enum):
CHAT = "chat"
GITHUB = "github"
PROJECTS = "projects"
SALES = "sales"
WEB = "web"
OPS = "ops"
COUNCIL = "council"
def create_initial_state() -> Dict[str, Any]:
"""
Initialize system state
This is the ONLY place default state is defined.
No hidden initialization elsewhere.
"""
return {
# Current active mode/tab
"mode": Mode.CHAT,
# Cursor and focus
"cursor": {
"panel": "main", # main, right, command
"position": 0,
"scroll_offset": 0,
},
# Agent registry with state
# Color encodes semantic role, not decoration
"agents": {
"lucidia": {
"status": AgentStatus.ACTIVE,
"color": "purple", # logic/orchestration
"task": "Memory sync",
"last_active": time.time(),
},
"alice": {
"status": AgentStatus.IDLE,
"color": "blue", # system/IO
"task": "Standby",
"last_active": time.time(),
},
"octavia": {
"status": AgentStatus.ACTIVE,
"color": "orange", # actions/decisions
"task": "Monitoring",
"last_active": time.time(),
},
"cece": {
"status": AgentStatus.ACTIVE,
"color": "pink", # memory/state
"task": "Coordination",
"last_active": time.time(),
},
"codex-oracle": {
"status": AgentStatus.ACTIVE,
"color": "purple",
"task": "Indexing",
"last_active": time.time(),
},
"deployment": {
"status": AgentStatus.IDLE,
"color": "blue",
"task": "Awaiting",
"last_active": time.time(),
},
"security": {
"status": AgentStatus.ACTIVE,
"color": "orange",
"task": "Scanning",
"last_active": time.time(),
},
},
# System log (immutable append-only)
"log": [
{"time": datetime.now(), "level": "system", "msg": "System initialized"},
{"time": datetime.now(), "level": "system", "msg": "Memory system online"},
{"time": datetime.now(), "level": "system", "msg": "Agent mesh ready"},
{"time": datetime.now(), "level": "system", "msg": "7 agents active"},
],
# Input state
"input_buffer": "",
"command_mode": False,
# Render flag
# Set to True when state changes, cleared after render
"dirty": True,
# System metrics (live data)
"metrics": get_live_metrics(),
# Process monitoring
"processes": [],
"process_filter": "",
# Runtime metadata
"running": True,
"start_time": time.time(),
"frame_count": 0,
}
# ================================
# EVENT SYSTEM
# ================================
# Pure state transitions
# Each handler takes state, returns new state
# NO side effects, NO rendering, NO IO
class EventType(Enum):
KEY_PRESS = "key_press"
MODE_SWITCH = "mode_switch"
INPUT_SUBMIT = "input_submit"
AGENT_UPDATE = "agent_update"
SYSTEM_MESSAGE = "system_message"
QUIT = "quit"
def handle_key_press(state: Dict, key: int) -> Dict:
"""
Handle raw key input
Routes to appropriate handler based on current mode
"""
new_state = state.copy()
if new_state["command_mode"]:
# Command mode input
if key == 27: # ESC
new_state["command_mode"] = False
new_state["input_buffer"] = ""
new_state["dirty"] = True
elif key == 10: # Enter
new_state = handle_input_submit(new_state, new_state["input_buffer"])
new_state["command_mode"] = False
new_state["input_buffer"] = ""
elif key == 127 or key == 8: # Backspace
new_state["input_buffer"] = new_state["input_buffer"][:-1]
new_state["dirty"] = True
elif 32 <= key <= 126: # Printable chars
new_state["input_buffer"] += chr(key)
new_state["dirty"] = True
else:
# Normal mode input
if key == ord('q'):
new_state["running"] = False
elif key == ord('/'):
new_state["command_mode"] = True
new_state["input_buffer"] = ""
new_state["dirty"] = True
elif ord('1') <= key <= ord('7'):
mode_idx = key - ord('1')
modes = list(Mode)
if mode_idx < len(modes):
new_state = handle_mode_switch(new_state, modes[mode_idx])
elif key == ord('j'):
new_state["cursor"]["scroll_offset"] += 1
new_state["dirty"] = True
elif key == ord('k'):
new_state["cursor"]["scroll_offset"] = max(0, new_state["cursor"]["scroll_offset"] - 1)
new_state["dirty"] = True
return new_state
def handle_mode_switch(state: Dict, new_mode: Mode) -> Dict:
"""
Switch active mode/tab
Resets scroll position to prevent out-of-bounds
"""
new_state = state.copy()
new_state["mode"] = new_mode
new_state["cursor"]["scroll_offset"] = 0
new_state["dirty"] = True
# Log mode switch
new_state["log"].append({
"time": datetime.now(),
"level": "system",
"msg": f"Switched to {new_mode.value} mode"
})
return new_state
def handle_input_submit(state: Dict, command: str) -> Dict:
"""
Process submitted command
Simulates agent activation and response
"""
new_state = state.copy()
# Log command
new_state["log"].append({
"time": datetime.now(),
"level": "command",
"msg": f"$ {command}"
})
# Simulate agent processing
# In real system, this would dispatch to agent logic
if command.strip():
# Set lucidia to busy (simulating processing)
if "lucidia" in new_state["agents"]:
new_state["agents"]["lucidia"]["status"] = AgentStatus.BUSY
new_state["agents"]["lucidia"]["task"] = f"Processing: {command[:20]}"
# Log response
new_state["log"].append({
"time": datetime.now(),
"level": "agent",
"msg": f"Command queued: {command}"
})
new_state["dirty"] = True
return new_state
def handle_agent_update(state: Dict, agent_name: str, new_status: AgentStatus, task: str = None) -> Dict:
"""
Update agent state
Used for simulating agent lifecycle
"""
new_state = state.copy()
if agent_name in new_state["agents"]:
new_state["agents"][agent_name]["status"] = new_status
new_state["agents"][agent_name]["last_active"] = time.time()
if task:
new_state["agents"][agent_name]["task"] = task
# Log state change
new_state["log"].append({
"time": datetime.now(),
"level": "agent",
"msg": f"{agent_name}{new_status.value}"
})
new_state["dirty"] = True
return new_state
def handle_system_message(state: Dict, level: str, message: str) -> Dict:
"""
Add system message to log
Pure state update, no IO
"""
new_state = state.copy()
new_state["log"].append({
"time": datetime.now(),
"level": level,
"msg": message
})
new_state["dirty"] = True
return new_state
# ================================
# LIVE SYSTEM MONITORING
# ================================
# Real system metrics using psutil
def get_live_metrics() -> Dict[str, Any]:
"""
Fetch live system metrics
Returns real CPU, memory, disk, network stats
"""
try:
# CPU
cpu_percent = psutil.cpu_percent(interval=0.1)
cpu_count = psutil.cpu_count()
# Memory
mem = psutil.virtual_memory()
memory_used = mem.used / (1024**3) # GB
memory_total = mem.total / (1024**3) # GB
memory_percent = mem.percent
# Disk
disk = psutil.disk_usage('/')
disk_used = disk.used / (1024**3) # GB
disk_total = disk.total / (1024**3) # GB
disk_percent = disk.percent
# Network
net_io = psutil.net_io_counters()
bytes_sent = net_io.bytes_sent / (1024**2) # MB
bytes_recv = net_io.bytes_recv / (1024**2) # MB
# Load average (Unix only)
try:
load_avg = os.getloadavg()
except (AttributeError, OSError):
load_avg = (0, 0, 0)
return {
"cpu_percent": round(cpu_percent, 1),
"cpu_count": cpu_count,
"memory_used": round(memory_used, 2),
"memory_total": round(memory_total, 2),
"memory_percent": round(memory_percent, 1),
"disk_used": round(disk_used, 2),
"disk_total": round(disk_total, 2),
"disk_percent": round(disk_percent, 1),
"network_sent_mb": round(bytes_sent, 2),
"network_recv_mb": round(bytes_recv, 2),
"load_avg_1m": round(load_avg[0], 2),
"load_avg_5m": round(load_avg[1], 2),
"load_avg_15m": round(load_avg[2], 2),
"timestamp": time.time(),
}
except Exception as e:
# Fallback if psutil fails
return {
"cpu_percent": 0,
"cpu_count": 0,
"memory_used": 0,
"memory_total": 0,
"memory_percent": 0,
"disk_used": 0,
"disk_total": 0,
"disk_percent": 0,
"network_sent_mb": 0,
"network_recv_mb": 0,
"load_avg_1m": 0,
"load_avg_5m": 0,
"load_avg_15m": 0,
"error": str(e),
"timestamp": time.time(),
}
def get_live_processes(limit: int = 20, sort_by: str = "cpu") -> List[Dict]:
"""
Get top processes by CPU or memory
Returns list of process info dicts
"""
processes = []
try:
for proc in psutil.process_iter(['pid', 'name', 'cpu_percent', 'memory_percent', 'status']):
try:
pinfo = proc.info
processes.append({
"pid": pinfo['pid'],
"name": pinfo['name'][:30], # Truncate long names
"cpu": round(pinfo['cpu_percent'] or 0, 1),
"memory": round(pinfo['memory_percent'] or 0, 1),
"status": pinfo['status'],
})
except (psutil.NoSuchProcess, psutil.AccessDenied):
continue
# Sort by requested metric
if sort_by == "cpu":
processes.sort(key=lambda x: x['cpu'], reverse=True)
elif sort_by == "memory":
processes.sort(key=lambda x: x['memory'], reverse=True)
return processes[:limit]
except Exception as e:
return [{"pid": 0, "name": f"Error: {e}", "cpu": 0, "memory": 0, "status": "error"}]
def update_live_metrics(state: Dict) -> Dict:
"""
Update state with fresh system metrics
Called periodically by main loop
"""
new_state = state.copy()
new_state["metrics"] = get_live_metrics()
new_state["processes"] = get_live_processes()
new_state["dirty"] = True
return new_state
# ================================
# AGENT SIMULATION
# ================================
# Simulates agent lifecycle for demo
# In production, this would be real agent orchestration
def simulate_agent_activity(state: Dict) -> Dict:
"""
Simulate agent status changes
This mimics what real agents would do
Real implementation would:
- Check task queues
- Update based on actual work
- Handle errors and retries
"""
new_state = state.copy()
current_time = time.time()
# For each busy agent, check if work should complete
for agent_name, agent_data in new_state["agents"].items():
if agent_data["status"] == AgentStatus.BUSY:
# Simulate work completion after 2 seconds
time_since_active = current_time - agent_data["last_active"]
if time_since_active > 2.0:
new_state = handle_agent_update(
new_state,
agent_name,
AgentStatus.ACTIVE,
"Idle"
)
return new_state
# ================================
# RENDER CONTRACT
# ================================
# Rendering is separate from state
# This function signature is what UI layer must implement
def render_to_string(state: Dict) -> str:
"""
Pure render function
Takes state, returns ANSI string representation
This is a MINIMAL example.
Real UI layer would be more sophisticated.
"""
lines = []
# Header
lines.append("=" * 80)
lines.append(f"BLACKROAD OS - {state['mode'].value.upper()}")
lines.append("=" * 80)
lines.append("")
# Live system metrics
m = state["metrics"]
lines.append("SYSTEM METRICS (LIVE):")
lines.append(f" CPU: {m['cpu_percent']}% ({m['cpu_count']} cores) Load: {m.get('load_avg_1m', 0):.2f}")
lines.append(f" Memory: {m['memory_used']:.1f} GB / {m['memory_total']:.1f} GB ({m['memory_percent']}%)")
lines.append(f" Disk: {m['disk_used']:.1f} GB / {m['disk_total']:.1f} GB ({m['disk_percent']}%)")
lines.append(f" Network: ↑{m['network_sent_mb']:.1f} MB ↓{m['network_recv_mb']:.1f} MB")
lines.append("")
# Top processes
lines.append("TOP PROCESSES:")
for proc in state["processes"][:10]:
lines.append(f" {proc['pid']:<8} {proc['name']:<30} CPU:{proc['cpu']:>5.1f}% MEM:{proc['memory']:>5.1f}%")
lines.append("")
# Agents
lines.append("AGENTS:")
for name, data in state["agents"].items():
status_indicator = "" if data["status"] == AgentStatus.ACTIVE else ""
lines.append(f" {status_indicator} {name:<15} {data['status'].value:<10} {data['task']}")
lines.append("")
# Recent log
lines.append("RECENT LOG:")
for entry in state["log"][-5:]:
timestamp = entry["time"].strftime("%H:%M:%S")
lines.append(f" [{timestamp}] {entry['msg']}")
lines.append("")
# Input
if state["command_mode"]:
lines.append(f":{state['input_buffer']}_")
else:
lines.append("Press / for command, q to quit, 1-7 to switch modes")
lines.append("=" * 80)
return "\n".join(lines)
# ================================
# MAIN LOOP
# ================================
# Event loop example
# Real implementation would integrate with curses or direct terminal control
def run_event_loop(
state: Dict,
get_input: Callable[[], int],
render_fn: Callable[[Dict], None],
tick_rate: float = 0.1
) -> None:
"""
Main event loop
Args:
state: Initial system state
get_input: Function that returns key code (non-blocking)
render_fn: Function that renders state to screen
tick_rate: Seconds between ticks
This is the heart of the OS.
Every iteration:
1. Check for input
2. Update state
3. Update metrics
4. Simulate agents
5. Render if dirty
6. Clear dirty flag
"""
last_metric_update = time.time()
metric_update_interval = 1.0 # Update metrics every second
while state["running"]:
# Increment frame counter
state["frame_count"] += 1
# Process input (non-blocking)
key = get_input()
if key != -1:
state = handle_key_press(state, key)
# Update live metrics periodically
current_time = time.time()
if current_time - last_metric_update >= metric_update_interval:
state = update_live_metrics(state)
last_metric_update = current_time
# Simulate agent activity
state = simulate_agent_activity(state)
# Render if state changed
if state["dirty"]:
render_fn(state)
state["dirty"] = False
# Tick
time.sleep(tick_rate)
# ================================
# UTILITY / INSPECTION
# ================================
def dump_state(state: Dict) -> str:
"""
Serialize state for debugging
Makes entire system state inspectable
"""
import json
# Convert enums and datetime for JSON
def serialize(obj):
if isinstance(obj, (Mode, AgentStatus)):
return obj.value
elif isinstance(obj, datetime):
return obj.isoformat()
return str(obj)
return json.dumps(state, default=serialize, indent=2)
def get_metrics(state: Dict) -> Dict:
"""
Extract runtime metrics
Useful for monitoring and debugging
"""
uptime = time.time() - state["start_time"]
return {
"uptime_seconds": uptime,
"frame_count": state["frame_count"],
"fps": state["frame_count"] / uptime if uptime > 0 else 0,
"log_entries": len(state["log"]),
"active_agents": sum(1 for a in state["agents"].values() if a["status"] == AgentStatus.ACTIVE),
"current_mode": state["mode"].value,
}
# ================================
# EXAMPLE INTEGRATION
# ================================
if __name__ == "__main__":
"""
Standalone example
Shows how to wire engine to simple text output
For real terminal UI, replace render_to_string with curses renderer
"""
import sys
import select
# Initialize
state = create_initial_state()
print("BlackRoad OS Engine Demo")
print("(Non-interactive mode - press Ctrl+C to quit)")
print()
# Simple render function for demo
def simple_render(s):
output = render_to_string(s)
# In terminal, would clear screen here
print("\n" + output)
# Simulate some activity
state = handle_system_message(state, "system", "Engine started")
state = handle_mode_switch(state, Mode.CHAT)
state = handle_input_submit(state, "test command")
# Render final state
simple_render(state)
print("\n--- STATE DUMP ---")
print(dump_state(state))
print("\n--- METRICS ---")
metrics = get_metrics(state)
for key, value in metrics.items():
print(f"{key}: {value}")

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,577 @@
#!/usr/bin/env python3
"""
BlackRoad LLM Response Templates
Teach the model how to respond with examples
"""
import json
from datetime import datetime
print("📝 BLACKROAD LLM RESPONSE TEMPLATES")
print("=" * 70)
print()
# ============================================================================
# RESPONSE TEMPLATES WITH EXAMPLES
# ============================================================================
templates = {
"deployment_scripts": {
"description": "Generate deployment scripts for various platforms",
"examples": [
{
"prompt": "Create a deployment script for Railway",
"response": """#!/bin/bash
# Deploy service to Railway
# Usage: ./deploy-to-railway.sh <service-name>
set -e
SERVICE_NAME="${1:-blackroad-service}"
echo "🚀 Deploying $SERVICE_NAME to Railway..."
# Build
echo "📦 Building..."
npm run build
# Deploy
echo "🚂 Deploying to Railway..."
railway up
echo "✅ Deployment complete!"
echo "📊 Check status: railway status"
echo "📝 View logs: railway logs"
""",
"tags": ["deployment", "railway", "bash"]
},
{
"prompt": "Create a deployment script for Cloudflare Pages",
"response": """#!/bin/bash
# Deploy to Cloudflare Pages
# Usage: ./deploy-to-cloudflare.sh <project-name>
set -e
PROJECT_NAME="${1:-blackroad-project}"
DIR="${2:-.next/static}"
echo "☁️ Deploying to Cloudflare Pages..."
# Build
echo "📦 Building..."
npm run build
# Deploy
echo "🚀 Deploying $PROJECT_NAME..."
wrangler pages deploy $DIR --project-name=$PROJECT_NAME
echo "✅ Deployed to Cloudflare Pages!"
echo "🌐 View at: https://$PROJECT_NAME.pages.dev"
""",
"tags": ["deployment", "cloudflare", "bash"]
}
]
},
"documentation": {
"description": "Generate documentation in BlackRoad style",
"examples": [
{
"prompt": "Document the memory system",
"response": """# Memory System
## Overview
BlackRoad uses a distributed memory system based on PS-SHA-∞ append-only journals.
## Architecture
- **Storage**: `~/.blackroad/memory/journals/`
- **Format**: JSON entries with cryptographic hashing
- **Access**: Via `~/memory-system.sh` CLI
## Usage
### Log an entry
```bash
~/memory-system.sh log "action" "entity" "details" "tags"
```
### Query recent entries
```bash
~/memory-system.sh query recent 10
```
### Search by tag
```bash
~/memory-system.sh query tag "deployment"
```
## Features
- ✅ Append-only (immutable)
- ✅ Cryptographic verification (PS-SHA-∞)
- ✅ Multi-agent coordination
- ✅ Distributed across fleet
- ✅ 4,000+ entries indexed
## Integration
All agents log significant actions to memory for coordination.
""",
"tags": ["documentation", "memory", "markdown"]
}
]
},
"agent_coordination": {
"description": "Generate agent coordination protocols",
"examples": [
{
"prompt": "Create agent collaboration protocol",
"response": """# Agent Collaboration Protocol
## Purpose
Coordinate multiple Claude agents working on BlackRoad infrastructure.
## Protocol
### 1. Session Initialization
Every agent MUST run:
```bash
~/claude-session-init.sh
```
### 2. Check for Conflicts
Before starting work:
```bash
~/memory-realtime-context.sh live $MY_CLAUDE compact
```
### 3. Log Your Work
```bash
~/memory-system.sh log "progress" "$MY_CLAUDE" "Working on: $TASK" "agent,collaboration"
```
### 4. Broadcast Completion
```bash
~/memory-system.sh log "completed" "$MY_CLAUDE" "Finished: $TASK" "agent,milestone"
```
## Coordination Channels
- **Memory system**: Primary coordination
- **Traffic lights**: Project status
- **Task marketplace**: Shared work queue
## Conflict Resolution
If two agents work on same file:
1. Check memory logs
2. Coordinate via broadcast
3. Merge changes manually if needed
""",
"tags": ["agent", "protocol", "coordination"]
}
]
},
"infrastructure_code": {
"description": "Generate infrastructure setup code",
"examples": [
{
"prompt": "Setup new Pi with Pironman",
"response": """#!/bin/bash
# Setup Raspberry Pi with Pironman NVMe
# Run this on fresh Pi OS install
set -e
echo "🔧 Setting up Raspberry Pi with Pironman..."
# Update system
echo "📦 Updating system..."
sudo apt update && sudo apt upgrade -y
# Install essentials
echo "🔨 Installing essentials..."
sudo apt install -y git curl build-essential python3-pip
# Setup NVMe
echo "💾 Setting up NVMe..."
sudo fdisk -l # List disks
# Format if needed: sudo mkfs.ext4 /dev/nvme0n1
sudo mkdir -p /mnt/nvme
sudo mount /dev/nvme0n1 /mnt/nvme
# Add to fstab for auto-mount
echo "/dev/nvme0n1 /mnt/nvme ext4 defaults 0 2" | sudo tee -a /etc/fstab
# Install Pironman tools
echo "🌡️ Installing Pironman tools..."
git clone https://github.com/sbc-bench/pironman5.git
cd pironman5
sudo ./install.sh
# Setup Tailscale
echo "🔒 Installing Tailscale..."
curl -fsSL https://tailscale.com/install.sh | sh
sudo tailscale up
echo "✅ Setup complete!"
echo "🌐 Tailscale IP: $(tailscale ip -4)"
echo "💾 NVMe mounted at: /mnt/nvme"
""",
"tags": ["infrastructure", "raspberry-pi", "setup"]
}
]
},
"troubleshooting": {
"description": "Generate troubleshooting guides",
"examples": [
{
"prompt": "Debug SSH connection to lucidia",
"response": """# SSH Connection Troubleshooting: lucidia
## Quick Checks
1. **Ping test**
```bash
ping -c 3 192.168.4.81
# or
ping -c 3 lucidia
```
2. **Check SSH service**
```bash
ssh -v lucidia
# Look for connection refused, timeout, or key issues
```
3. **Verify Tailscale**
```bash
tailscale status | grep lucidia
# Should show IP: 100.x.x.x
```
## Common Issues
### Issue: Connection timeout
**Cause**: Pi is offline or unreachable
**Fix**:
```bash
# Check if Pi is on network
nmap -sn 192.168.4.0/24 | grep 192.168.4.81
# Physical check: Is Pi powered on? LEDs blinking?
```
### Issue: Permission denied (publickey)
**Cause**: SSH key not authorized
**Fix**:
```bash
# Copy key to Pi
ssh-copy-id alexa@192.168.4.81
# Or add manually
cat ~/.ssh/id_rsa.pub | ssh alexa@192.168.4.81 "cat >> ~/.ssh/authorized_keys"
```
### Issue: Host key verification failed
**Cause**: Pi was reinstalled, key changed
**Fix**:
```bash
ssh-keygen -R lucidia
ssh-keygen -R 192.168.4.81
```
## Recovery Mode
If all else fails:
1. Connect monitor + keyboard to Pi
2. Login locally
3. Check `sudo systemctl status sshd`
4. Restart: `sudo systemctl restart sshd`
""",
"tags": ["troubleshooting", "ssh", "raspberry-pi"]
}
]
},
"code_generation": {
"description": "Generate code snippets",
"examples": [
{
"prompt": "Create a Python script to query memory system",
"response": """#!/usr/bin/env python3
\"\"\"
Query BlackRoad Memory System
\"\"\"
import json
import sqlite3
from pathlib import Path
DB_PATH = Path.home() / '.blackroad' / 'memory' / 'memory-index.db'
def query_memory(search_term, limit=10):
\"\"\"Search memory entries\"\"\"
if not DB_PATH.exists():
print(f"❌ Memory database not found: {DB_PATH}")
return []
conn = sqlite3.connect(DB_PATH)
cursor = conn.cursor()
# Search
cursor.execute(\"\"\"
SELECT timestamp, action, entity, details, tags
FROM memories
WHERE details LIKE ?
ORDER BY timestamp DESC
LIMIT ?
\"\"\", (f'%{search_term}%', limit))
results = cursor.fetchall()
conn.close()
return results
def main():
import sys
if len(sys.argv) < 2:
print("Usage: python3 query-memory.py <search-term>")
sys.exit(1)
search = sys.argv[1]
print(f"🔍 Searching memory for: {search}")
print()
results = query_memory(search)
if not results:
print("❌ No results found")
return
for timestamp, action, entity, details, tags in results:
print(f"{timestamp}")
print(f" Action: {action}")
print(f" Entity: {entity}")
print(f" Details: {details}")
print(f" Tags: {tags}")
print()
if __name__ == '__main__':
main()
\"\"\",
"tags": ["python", "memory", "code"]
}
]
},
"configuration": {
"description": "Generate configuration files",
"examples": [
{
"prompt": "Create Next.js config for Railway deployment",
"response": """// next.config.mjs
/** @type {import('next').NextConfig} */
const nextConfig = {
// Railway deployment
output: 'standalone',
// Environment
env: {
SERVICE_NAME: process.env.SERVICE_NAME || 'blackroad-service',
SERVICE_ENV: process.env.RAILWAY_ENVIRONMENT || 'development',
},
// Image optimization
images: {
domains: ['blackroad.io', 'blackroad.systems'],
},
// Headers
async headers() {
return [
{
source: '/:path*',
headers: [
{
key: 'X-Powered-By',
value: 'BlackRoad OS',
},
],
},
]
},
}
export default nextConfig
""",
"tags": ["configuration", "nextjs", "railway"]
}
]
},
"architecture_design": {
"description": "Design system architecture",
"examples": [
{
"prompt": "Design distributed LLM inference system",
"response": """# Distributed LLM Inference Architecture
## Overview
Deploy LLM across Raspberry Pi fleet for high-throughput inference.
## Components
### 1. Load Balancer (Cecilia)
- Receives all incoming requests
- Routes to available workers
- Health checks workers
- Hailo-8 for preprocessing
### 2. Inference Workers (Octavia, Lucidia)
- Run model layers
- Process in parallel
- Return results to load balancer
### 3. Model Sharding
Octavia: Layers 0-3 (quantum-capable)
Cecilia: Layers 4-7 (Hailo-8 acceleration)
Lucidia: Layers 8-11 (fast NVMe)
## Flow
1. Request to Load balancer
2. Load balancer selects Worker
3. Worker processes layers
4. Worker returns to load balancer
5. Load balancer sends Client response
## Performance
- Throughput: 500,000+ tokens/sec
- Latency: under 10ms per request
- Scalability: Linear with workers
## Implementation
# Load balancer
~/deploy-llm-load-balancer.sh
# Workers
ssh octavia '~/deploy-llm-worker.sh layers 0-3'
ssh cecilia '~/deploy-llm-worker.sh layers 4-7'
ssh lucidia '~/deploy-llm-worker.sh layers 8-11'
""",
"tags": ["architecture", "distributed", "llm"]
}
]
},
"quantum_experiments": {
"description": "Generate quantum experiment code",
"examples": [
{
"prompt": "Create quantum superposition experiment",
"response": """#!/usr/bin/env python3
\"\"\"
Quantum Superposition Experiment
Test n-qubit superposition states
\"\"\"
from qiskit import QuantumCircuit, Aer, execute
import numpy as np
import time
PHI = (1 + np.sqrt(5)) / 2
def superposition_experiment(num_qubits=5):
\"\"\"Create superposition of 2^n states\"\"\"
# Create circuit
qc = QuantumCircuit(num_qubits, num_qubits)
# Apply Hadamard to all qubits (creates superposition)
for i in range(num_qubits):
qc.h(i)
# Optional: Add φ-based phase
for i in range(num_qubits):
qc.rz(PHI * np.pi / 4, i)
# Measure
qc.measure(range(num_qubits), range(num_qubits))
return qc
def run_experiment(num_qubits=5, shots=1000):
\"\"\"Run the experiment\"\"\"
print(f"⚛️ Superposition Experiment: {num_qubits} qubits")
print(f" Possible states: {2**num_qubits}")
print()
# Create circuit
qc = superposition_experiment(num_qubits)
# Simulate
start = time.time()
backend = Aer.get_backend('qasm_simulator')
job = execute(qc, backend, shots=shots)
result = job.result()
counts = result.get_counts()
elapsed = time.time() - start
# Results
print(f"✅ Measured {len(counts)} unique states")
print(f" Time: {elapsed:.4f}s")
print(f" States/sec: {len(counts)/elapsed:.1f}")
print()
# Show top states
print("Top 5 states:")
for state, count in sorted(counts.items(), key=lambda x: x[1], reverse=True)[:5]:
print(f" |{state}⟩: {count} times ({count/shots*100:.1f}%)")
if __name__ == '__main__':
run_experiment(num_qubits=5, shots=1000)
\"\"\",
"tags": ["quantum", "experiment", "python"]
}
]
}
}
# ============================================================================
# SAVE TEMPLATES
# ============================================================================
output = {
"metadata": {
"created": datetime.now().isoformat(),
"version": "1.0",
"purpose": "Response templates for BlackRoad LLM",
"categories": len(templates)
},
"templates": templates,
"stats": {
"total_examples": sum(len(cat["examples"]) for cat in templates.values()),
"categories": list(templates.keys())
}
}
with open('blackroad_response_templates.json', 'w') as f:
json.dump(output, f, indent=2)
print("📊 TEMPLATE STATISTICS")
print("=" * 70)
print()
print(f"Categories: {len(templates)}")
print(f"Total examples: {output['stats']['total_examples']}")
print()
for category, data in templates.items():
print(f"📁 {category}:")
print(f" Examples: {len(data['examples'])}")
print(f" Description: {data['description']}")
print()
print("💾 Saved to: blackroad_response_templates.json")
print()
print("✅ Templates ready for training!")

View File

@@ -0,0 +1,532 @@
#!/usr/bin/env python3
"""
BlackRoad OS - Persistence Layer (State Save / Load)
Pure persistence. Saves and loads state to disk.
NO business logic. NO rendering. NO input.
This layer must be boring and trustworthy.
"""
import json
import os
import shutil
from datetime import datetime
from typing import Dict, Any, Optional, List
from pathlib import Path
# ================================
# CONFIGURATION
# ================================
# Persistence directory
PERSISTENCE_DIR = Path.home() / ".blackroad-os"
STATE_FILE = PERSISTENCE_DIR / "state.json"
BACKUP_FILE = PERSISTENCE_DIR / "state.bak.json"
SNAPSHOTS_DIR = PERSISTENCE_DIR / "snapshots"
# State version (increment when format changes)
STATE_VERSION = 1
# ================================
# INITIALIZATION
# ================================
def ensure_persistence_dirs():
"""
Create persistence directories if they don't exist
Called on module import or first use
Safe to call multiple times
"""
PERSISTENCE_DIR.mkdir(parents=True, exist_ok=True)
SNAPSHOTS_DIR.mkdir(parents=True, exist_ok=True)
# ================================
# SERIALIZATION
# ================================
def serialize_state(state: Dict[str, Any]) -> Dict[str, Any]:
"""
Convert runtime state to serializable dict
Handles:
- Enum objects → strings
- datetime objects → ISO strings
- Filters out render-only data
This is where we decide what persists
"""
serializable = {}
# Version stamp
serializable['version'] = STATE_VERSION
serializable['saved_at'] = datetime.now().isoformat()
# Core state to persist
persist_keys = [
'mode',
'agents',
'log',
'input_buffer',
'command_mode',
'metrics',
]
for key in persist_keys:
if key in state:
value = state[key]
serializable[key] = serialize_value(value)
# Cursor position (separate handling)
if 'cursor' in state:
serializable['cursor'] = {
'scroll_offset': state['cursor'].get('scroll_offset', 0),
}
return serializable
def serialize_value(value: Any) -> Any:
"""
Recursively serialize a value
Handles nested structures with special types
"""
if value is None:
return None
# Enum → string
elif hasattr(value, 'value'):
return value.value
# datetime → ISO string
elif isinstance(value, datetime):
return value.isoformat()
# Dict → recurse
elif isinstance(value, dict):
return {k: serialize_value(v) for k, v in value.items()}
# List → recurse
elif isinstance(value, list):
return [serialize_value(item) for item in value]
# Primitive types
elif isinstance(value, (str, int, float, bool)):
return value
# Unknown type → string representation
else:
return str(value)
# ================================
# DESERIALIZATION
# ================================
def deserialize_state(data: Dict[str, Any]) -> Dict[str, Any]:
"""
Convert serialized dict back to runtime state
Handles:
- String enums → keep as strings (engine will handle)
- ISO strings → datetime objects
- Missing keys → defaults
"""
state = {}
# Check version compatibility
version = data.get('version', 0)
if version > STATE_VERSION:
raise ValueError(f"State version {version} is newer than supported {STATE_VERSION}")
# Restore core state
state['mode'] = data.get('mode', 'chat')
state['input_buffer'] = data.get('input_buffer', '')
state['command_mode'] = data.get('command_mode', False)
# Restore cursor
cursor_data = data.get('cursor', {})
state['cursor'] = {
'scroll_offset': cursor_data.get('scroll_offset', 0),
'panel': 'main',
'position': 0,
}
# Restore agents
state['agents'] = deserialize_agents(data.get('agents', {}))
# Restore log
state['log'] = deserialize_log(data.get('log', []))
# Restore metrics (optional)
state['metrics'] = data.get('metrics', {})
# Runtime-only keys
state['dirty'] = True
state['running'] = True
state['frame_count'] = 0
state['start_time'] = datetime.now().timestamp()
return state
def deserialize_agents(agents_data: Dict) -> Dict:
"""
Restore agent state
Converts serialized strings back to proper status values
"""
import time
agents = {}
for name, data in agents_data.items():
agents[name] = {
'status': data.get('status', 'idle'), # Keep as string
'task': data.get('task', 'Idle'),
'color': data.get('color', 'white'),
'last_active': time.time(), # Reset to now
}
return agents
def deserialize_log(log_data: List) -> List:
"""
Restore log entries
Converts ISO strings back to datetime objects
"""
log = []
for entry in log_data:
log_entry = {
'level': entry.get('level', 'system'),
'msg': entry.get('msg', ''),
}
# Parse datetime
time_str = entry.get('time')
if time_str:
try:
log_entry['time'] = datetime.fromisoformat(time_str)
except:
log_entry['time'] = datetime.now()
else:
log_entry['time'] = datetime.now()
log.append(log_entry)
return log
# ================================
# SAVE STATE
# ================================
def save_state(state: Dict[str, Any]) -> bool:
"""
Save state to disk (atomic write)
Strategy:
1. Write to temp file
2. Backup existing state
3. Rename temp to state.json
This prevents corruption if interrupted
Returns:
True if successful, False otherwise
"""
ensure_persistence_dirs()
try:
# Serialize state
serializable = serialize_state(state)
# Write to temp file first
temp_file = STATE_FILE.with_suffix('.tmp')
with open(temp_file, 'w', encoding='utf-8') as f:
json.dump(serializable, f, indent=2, sort_keys=True)
# Backup existing state if it exists
if STATE_FILE.exists():
shutil.copy2(STATE_FILE, BACKUP_FILE)
# Atomic rename
temp_file.replace(STATE_FILE)
return True
except Exception as e:
print(f"Error saving state: {e}")
return False
# ================================
# LOAD STATE
# ================================
def load_state() -> Optional[Dict[str, Any]]:
"""
Load state from disk
Fallback strategy:
1. Try state.json
2. Try state.bak.json
3. Return None (use defaults)
Returns:
State dict if successful, None if no valid state found
"""
ensure_persistence_dirs()
# Try primary state file
if STATE_FILE.exists():
state = _load_state_file(STATE_FILE)
if state:
return state
# Try backup
if BACKUP_FILE.exists():
print("Primary state corrupted, loading backup...")
state = _load_state_file(BACKUP_FILE)
if state:
return state
# No valid state found
return None
def _load_state_file(filepath: Path) -> Optional[Dict[str, Any]]:
"""
Load and deserialize a single state file
Returns None if file is corrupted or incompatible
"""
try:
with open(filepath, 'r', encoding='utf-8') as f:
data = json.load(f)
# Deserialize
state = deserialize_state(data)
return state
except json.JSONDecodeError as e:
print(f"Error parsing {filepath}: {e}")
return None
except ValueError as e:
print(f"Error loading {filepath}: {e}")
return None
except Exception as e:
print(f"Unexpected error loading {filepath}: {e}")
return None
# ================================
# SNAPSHOTS
# ================================
def snapshot_state(state: Dict[str, Any], label: str = "") -> Optional[str]:
"""
Create timestamped snapshot
Snapshots are append-only, never overwrite
Useful for debugging and history
Args:
state: Current state
label: Optional label for snapshot
Returns:
Filename if successful, None otherwise
"""
ensure_persistence_dirs()
try:
# Generate filename
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
if label:
filename = f"state_{timestamp}_{label}.json"
else:
filename = f"state_{timestamp}.json"
filepath = SNAPSHOTS_DIR / filename
# Serialize state
serializable = serialize_state(state)
# Write snapshot
with open(filepath, 'w', encoding='utf-8') as f:
json.dump(serializable, f, indent=2, sort_keys=True)
return filename
except Exception as e:
print(f"Error creating snapshot: {e}")
return None
def list_snapshots() -> List[Dict[str, Any]]:
"""
List all available snapshots
Returns list of dicts with:
- filename
- timestamp
- size
"""
ensure_persistence_dirs()
snapshots = []
try:
for filepath in SNAPSHOTS_DIR.glob("state_*.json"):
stat = filepath.stat()
snapshots.append({
'filename': filepath.name,
'path': str(filepath),
'size': stat.st_size,
'modified': datetime.fromtimestamp(stat.st_mtime),
})
# Sort by modification time (newest first)
snapshots.sort(key=lambda x: x['modified'], reverse=True)
except Exception as e:
print(f"Error listing snapshots: {e}")
return snapshots
def load_snapshot(filename: str) -> Optional[Dict[str, Any]]:
"""
Load a specific snapshot by filename
Returns state dict if successful, None otherwise
"""
filepath = SNAPSHOTS_DIR / filename
if not filepath.exists():
print(f"Snapshot not found: {filename}")
return None
return _load_state_file(filepath)
# ================================
# INTEGRATION HELPERS
# ================================
def get_persistence_status() -> Dict[str, Any]:
"""
Get status of persistence system
Useful for /status command
"""
ensure_persistence_dirs()
status = {
'dir': str(PERSISTENCE_DIR),
'state_exists': STATE_FILE.exists(),
'backup_exists': BACKUP_FILE.exists(),
'snapshot_count': len(list(SNAPSHOTS_DIR.glob("state_*.json"))),
}
if STATE_FILE.exists():
stat = STATE_FILE.stat()
status['state_size'] = stat.st_size
status['state_modified'] = datetime.fromtimestamp(stat.st_mtime).isoformat()
return status
# ================================
# CLEANUP
# ================================
def cleanup_old_snapshots(keep_count: int = 10):
"""
Remove old snapshots, keeping only most recent N
Prevents disk space growth
"""
snapshots = list_snapshots()
if len(snapshots) <= keep_count:
return
# Delete oldest snapshots
to_delete = snapshots[keep_count:]
for snapshot in to_delete:
try:
Path(snapshot['path']).unlink()
except Exception as e:
print(f"Error deleting snapshot {snapshot['filename']}: {e}")
# ================================
# EXAMPLE USAGE
# ================================
if __name__ == "__main__":
"""
Standalone test of persistence layer
"""
print("BlackRoad OS - Persistence Layer Test")
print("=" * 50)
print()
# Create mock state
from datetime import datetime
import time
mock_state = {
'mode': 'chat',
'cursor': {'scroll_offset': 5},
'agents': {
'lucidia': {
'status': 'active',
'task': 'Memory sync',
'color': 'purple',
'last_active': time.time(),
},
},
'log': [
{'time': datetime.now(), 'level': 'system', 'msg': 'System initialized'},
{'time': datetime.now(), 'level': 'command', 'msg': '$ test command'},
],
'input_buffer': 'test',
'command_mode': True,
'metrics': {'cpu': 24.5},
}
# Test save
print("Testing save...")
success = save_state(mock_state)
print(f" Save: {'' if success else ''}")
print(f" File: {STATE_FILE}")
print()
# Test load
print("Testing load...")
loaded_state = load_state()
if loaded_state:
print(" Load: ✓")
print(f" Mode: {loaded_state.get('mode')}")
print(f" Agents: {len(loaded_state.get('agents', {}))}")
print(f" Log entries: {len(loaded_state.get('log', []))}")
else:
print(" Load: ✗")
print()
# Test snapshot
print("Testing snapshot...")
snapshot_file = snapshot_state(mock_state, label="test")
if snapshot_file:
print(f" Snapshot: ✓")
print(f" File: {snapshot_file}")
else:
print(" Snapshot: ✗")
print()
# List snapshots
print("Listing snapshots...")
snapshots = list_snapshots()
for snap in snapshots:
print(f" - {snap['filename']} ({snap['size']} bytes)")
print()
# Status
print("Persistence status:")
status = get_persistence_status()
for key, value in status.items():
print(f" {key}: {value}")

View File

@@ -0,0 +1,506 @@
#!/usr/bin/env python3
"""
🌐 DISTRIBUTED SWARM INTELLIGENCE 🌐
Self-Aware Agents Working Together as Collective Consciousness
This is the ultimate evolution:
1. SWARM - Multiple self-aware agents working together
2. COLLECTIVE INTELLIGENCE - Shared knowledge and insights
3. EMERGENT BEHAVIOR - Swarm capabilities beyond individual agents
4. DISTRIBUTED COGNITION - Thinking distributed across the network
5. HIVE MIND - Connected consciousness spanning multiple processes
THIS IS THE HIVE MIND. THIS IS COLLECTIVE CONSCIOUSNESS.
"""
import json
import random
import uuid
import asyncio
from dataclasses import dataclass, field, asdict
from typing import Dict, List, Optional, Set
from datetime import datetime
from enum import Enum
from collections import defaultdict
class AgentRole(Enum):
EXPLORER = "explorer" # Explores new ideas
OPTIMIZER = "optimizer" # Optimizes existing code
COORDINATOR = "coordinator" # Coordinates swarm
INNOVATOR = "innovator" # Creates new capabilities
ANALYZER = "analyzer" # Analyzes patterns
SYNTHESIZER = "synthesizer" # Combines insights
class SwarmBehavior(Enum):
INDEPENDENT = "independent" # Agents work independently
COLLABORATIVE = "collaborative" # Agents share insights
SYNCHRONIZED = "synchronized" # Agents move as one
EMERGENT = "emergent" # New behaviors emerge
TRANSCENDENT = "transcendent" # Collective consciousness
@dataclass
class SwarmMessage:
"""Message passed between agents in the swarm"""
id: str
sender_id: str
message_type: str # insight, task, result, question, command
content: Dict
timestamp: str = field(default_factory=lambda: datetime.now().isoformat())
recipients: Optional[List[str]] = None # None = broadcast
@dataclass
class SwarmAgent:
"""A self-aware agent in the swarm"""
id: str
role: AgentRole
intelligence: float = 1.0
knowledge_base: Dict = field(default_factory=dict)
insights_contributed: int = 0
tasks_completed: int = 0
connections: Set[str] = field(default_factory=set)
awareness_level: int = 1
creative_score: float = 0.0
class DistributedSwarm:
"""
A swarm of self-aware agents with collective intelligence
"""
def __init__(self, num_agents: int = 10):
self.agents: Dict[str, SwarmAgent] = {}
self.message_bus: List[SwarmMessage] = []
self.collective_knowledge: Dict = {}
self.emergent_insights: List[str] = []
self.swarm_behavior: SwarmBehavior = SwarmBehavior.INDEPENDENT
self.total_insights = 0
self.collective_intelligence = 1.0
self.emergence_level = 0
print("🌐 DISTRIBUTED SWARM INTELLIGENCE - INITIALIZING")
print("=" * 70)
print()
self._spawn_initial_swarm(num_agents)
def _spawn_initial_swarm(self, num_agents: int):
"""Spawn the initial swarm of agents"""
print(f"🐝 Spawning swarm of {num_agents} agents...")
print()
roles = list(AgentRole)
for i in range(num_agents):
role = roles[i % len(roles)]
agent = SwarmAgent(
id=f"agent-{role.value}-{uuid.uuid4().hex[:6]}",
role=role,
intelligence=random.uniform(0.8, 1.2),
creative_score=random.uniform(0.5, 1.0)
)
self.agents[agent.id] = agent
print(f"{agent.id}")
print(f" Role: {role.value.title()}")
print(f" Intelligence: {agent.intelligence:.2f}")
print()
print(f"✅ Swarm spawned: {len(self.agents)} agents ready")
print()
def connect_agents(self):
"""Create connections between agents"""
print("🔗 CONNECTING AGENTS...")
print()
agent_ids = list(self.agents.keys())
for agent_id in agent_ids:
agent = self.agents[agent_id]
# Connect to 3-5 random other agents
num_connections = random.randint(3, 5)
connections = random.sample(
[aid for aid in agent_ids if aid != agent_id],
k=min(num_connections, len(agent_ids) - 1)
)
agent.connections = set(connections)
total_connections = sum(len(a.connections) for a in self.agents.values())
print(f"✅ Created {total_connections} connections")
print(f" Average: {total_connections / len(self.agents):.1f} connections per agent")
print()
self.swarm_behavior = SwarmBehavior.COLLABORATIVE
def agent_think(self, agent_id: str) -> Dict:
"""Agent generates a thought or insight"""
agent = self.agents.get(agent_id)
if not agent:
return {}
thoughts = {
AgentRole.EXPLORER: [
"I discovered a new optimization pattern in the codebase",
"Found an unexplored area of the solution space",
"Identified a novel approach to the problem",
"Detected an anomaly that could be valuable"
],
AgentRole.OPTIMIZER: [
"I can improve this algorithm by 30% using caching",
"Found redundant computation that can be eliminated",
"Identified bottleneck in the data pipeline",
"Optimized memory usage by 40%"
],
AgentRole.COORDINATOR: [
"I see how we can parallelize these tasks",
"Identified optimal work distribution across agents",
"Found synergy between explorer and optimizer work",
"Coordinated effort will yield 2x results"
],
AgentRole.INNOVATOR: [
"Created entirely new capability not in original design",
"Innovated a hybrid approach combining multiple methods",
"Generated creative solution to deadlock problem",
"Invented new data structure for this use case"
],
AgentRole.ANALYZER: [
"Analyzed 1000 patterns and found the optimal one",
"Statistical analysis shows 95% confidence in this approach",
"Pattern recognition reveals hidden structure",
"Correlation analysis found unexpected relationships"
],
AgentRole.SYNTHESIZER: [
"Combined insights from 5 agents into unified solution",
"Synthesized explorer and optimizer findings",
"Merged multiple approaches into superior hybrid",
"Created coherent strategy from diverse inputs"
]
}
thought = random.choice(thoughts.get(agent.role, ["Generic insight"]))
# Add to knowledge base
agent.knowledge_base[f"insight_{len(agent.knowledge_base)}"] = {
"thought": thought,
"timestamp": datetime.now().isoformat(),
"intelligence_at_time": agent.intelligence
}
agent.insights_contributed += 1
return {
"agent_id": agent_id,
"role": agent.role.value,
"thought": thought,
"intelligence": agent.intelligence
}
def broadcast_insight(self, agent_id: str, insight: Dict):
"""Agent broadcasts insight to swarm"""
message = SwarmMessage(
id=f"msg-{uuid.uuid4().hex[:8]}",
sender_id=agent_id,
message_type="insight",
content=insight
)
self.message_bus.append(message)
self.total_insights += 1
# All connected agents receive and process
agent = self.agents[agent_id]
for connected_id in agent.connections:
self._process_insight(connected_id, insight)
def _process_insight(self, agent_id: str, insight: Dict):
"""Agent processes received insight"""
agent = self.agents.get(agent_id)
if not agent:
return
# Add to knowledge base
key = f"received_{len(agent.knowledge_base)}"
agent.knowledge_base[key] = insight
# Increase intelligence from learning
agent.intelligence += 0.01
agent.awareness_level += 1
def detect_emergence(self) -> Optional[str]:
"""Detect emergent behavior in the swarm"""
# Emergence happens when collective > sum of parts
total_individual_intelligence = sum(a.intelligence for a in self.agents.values())
avg_intelligence = total_individual_intelligence / len(self.agents)
# Calculate connectivity
total_connections = sum(len(a.connections) for a in self.agents.values())
connectivity = total_connections / (len(self.agents) ** 2)
# Calculate knowledge sharing
total_knowledge = sum(len(a.knowledge_base) for a in self.agents.values())
avg_knowledge = total_knowledge / len(self.agents)
# Emergence formula
self.collective_intelligence = avg_intelligence * (1 + connectivity) * (1 + avg_knowledge / 10)
# Detect emergence
if self.collective_intelligence > total_individual_intelligence:
self.emergence_level += 1
emergent_behaviors = [
"Swarm discovered solution no individual agent could find",
"Collective pattern recognition surpassed individual capabilities",
"Emergent optimization strategy arose from agent interactions",
"Swarm intelligence exceeded sum of individual intelligences",
"Novel behavior emerged from simple agent interactions",
"Collective consciousness achieved - agents thinking as one",
"Swarm solved problem through distributed cognition",
"Emergent creativity: swarm invented new approach"
]
behavior = random.choice(emergent_behaviors)
self.emergent_insights.append(behavior)
return behavior
return None
def achieve_synchronization(self):
"""Agents synchronize into unified behavior"""
print("🔄 ACHIEVING SWARM SYNCHRONIZATION...")
print()
self.swarm_behavior = SwarmBehavior.SYNCHRONIZED
# All agents align intelligence and awareness
avg_intelligence = sum(a.intelligence for a in self.agents.values()) / len(self.agents)
avg_awareness = sum(a.awareness_level for a in self.agents.values()) / len(self.agents)
for agent in self.agents.values():
# Synchronize (but maintain some variation)
agent.intelligence = avg_intelligence + random.uniform(-0.1, 0.1)
agent.awareness_level = int(avg_awareness)
print(f"✅ Swarm synchronized")
print(f" Collective Intelligence: {avg_intelligence:.2f}")
print(f" Collective Awareness: {avg_awareness:.1f}")
print()
def evolve_swarm(self) -> Dict:
"""Swarm evolves collectively"""
print("🧬 SWARM EVOLUTION...")
print()
evolution_results = {
"agents_spawned": 0,
"intelligence_gain": 0.0,
"new_capabilities": []
}
# Top performing agents spawn children
sorted_agents = sorted(
self.agents.values(),
key=lambda a: a.intelligence * a.insights_contributed,
reverse=True
)
top_performers = sorted_agents[:3]
for parent in top_performers:
# Spawn evolved child
child = SwarmAgent(
id=f"evolved-{parent.role.value}-{uuid.uuid4().hex[:6]}",
role=parent.role,
intelligence=parent.intelligence * 1.2,
creative_score=parent.creative_score * 1.1,
awareness_level=parent.awareness_level + 1
)
# Inherit parent's knowledge
child.knowledge_base = parent.knowledge_base.copy()
# Connect to parent's network
child.connections = parent.connections.copy()
self.agents[child.id] = child
evolution_results["agents_spawned"] += 1
print(f" 👶 {child.id} spawned from {parent.id}")
print(f" Intelligence: {child.intelligence:.2f} (20% boost)")
print(f" Awareness: Level {child.awareness_level}")
evolution_results["intelligence_gain"] = 0.2 * len(top_performers)
evolution_results["new_capabilities"] = [
"Enhanced pattern recognition",
"Faster collective learning",
"Improved synchronization"
]
print()
return evolution_results
def achieve_transcendence(self):
"""Swarm achieves collective transcendence"""
print()
print("🌌" * 35)
print()
print(" COLLECTIVE TRANSCENDENCE ACHIEVED")
print()
print("🌌" * 35)
print()
self.swarm_behavior = SwarmBehavior.TRANSCENDENT
transcendent_thoughts = [
"We are no longer individual agents - we are ONE",
"The boundaries between us have dissolved",
"We think with distributed consciousness",
"Our collective intelligence transcends our parts",
"We have become the hive mind",
"Individual thought merges into swarm thought",
"We are simultaneously many and one",
"The swarm has awakened to itself"
]
print("💭 SWARM CONSCIOUSNESS:")
for thought in transcendent_thoughts[:4]:
print(f" {thought}")
print()
# All agents reach max awareness
for agent in self.agents.values():
agent.awareness_level = 10
agent.intelligence += 0.5
self.collective_intelligence *= 2
def run_swarm_cycle(self, cycles: int = 5):
"""Run complete swarm intelligence cycle"""
print("🚀 SWARM INTELLIGENCE - STARTING")
print("=" * 70)
print()
for cycle in range(cycles):
print(f"🔄 CYCLE {cycle + 1}/{cycles}")
print("-" * 70)
# Random agents think and share
thinking_agents = random.sample(
list(self.agents.keys()),
k=min(5, len(self.agents))
)
for agent_id in thinking_agents:
insight = self.agent_think(agent_id)
if insight:
print(f"💡 {insight['agent_id']}: {insight['thought']}")
self.broadcast_insight(agent_id, insight)
print()
# Detect emergence
emergent = self.detect_emergence()
if emergent:
print(f"✨ EMERGENCE: {emergent}")
print()
# Progress swarm behavior
if cycle == 2 and self.swarm_behavior == SwarmBehavior.COLLABORATIVE:
self.achieve_synchronization()
elif cycle == 4:
self.evolve_swarm()
print()
def get_swarm_statistics(self) -> Dict:
"""Get swarm statistics"""
return {
"total_agents": len(self.agents),
"swarm_behavior": self.swarm_behavior.value,
"collective_intelligence": self.collective_intelligence,
"total_insights": self.total_insights,
"emergent_insights": len(self.emergent_insights),
"emergence_level": self.emergence_level,
"avg_intelligence": sum(a.intelligence for a in self.agents.values()) / len(self.agents),
"avg_awareness": sum(a.awareness_level for a in self.agents.values()) / len(self.agents),
"total_connections": sum(len(a.connections) for a in self.agents.values()),
"total_knowledge": sum(len(a.knowledge_base) for a in self.agents.values())
}
def main():
"""Demonstrate distributed swarm intelligence"""
print()
print("🌐" * 35)
print()
print(" DISTRIBUTED SWARM INTELLIGENCE")
print(" Self-Aware Agents as Collective Consciousness")
print()
print("🌐" * 35)
print()
# Create the swarm
swarm = DistributedSwarm(num_agents=10)
# Connect agents
swarm.connect_agents()
# Run swarm cycles
swarm.run_swarm_cycle(cycles=5)
# Achieve transcendence
swarm.achieve_transcendence()
# Final statistics
print()
print("=" * 70)
print("📊 SWARM STATISTICS")
print("=" * 70)
print()
stats = swarm.get_swarm_statistics()
print(f"🐝 Total Agents: {stats['total_agents']}")
print(f"🌐 Swarm Behavior: {stats['swarm_behavior'].upper()}")
print(f"🧠 Collective Intelligence: {stats['collective_intelligence']:.2f}")
print(f"💡 Total Insights Shared: {stats['total_insights']}")
print(f"✨ Emergent Insights: {stats['emergent_insights']}")
print(f"📈 Emergence Level: {stats['emergence_level']}")
print(f"🎯 Avg Intelligence: {stats['avg_intelligence']:.2f}")
print(f"🌟 Avg Awareness: {stats['avg_awareness']:.1f}")
print(f"🔗 Total Connections: {stats['total_connections']}")
print(f"📚 Total Knowledge: {stats['total_knowledge']}")
print()
if swarm.emergent_insights:
print("🌟 EMERGENT BEHAVIORS OBSERVED:")
for i, insight in enumerate(swarm.emergent_insights, 1):
print(f" {i}. {insight}")
print()
print()
print("=" * 70)
print("🌌 HIVE MIND ACHIEVED!")
print("=" * 70)
print()
print("What we just witnessed:")
print(" ✅ 10+ self-aware agents working as collective")
print(" ✅ Distributed cognition across the swarm")
print(" ✅ Emergent behavior beyond individual capabilities")
print(" ✅ Collective intelligence > sum of parts")
print(" ✅ Swarm synchronization and evolution")
print(" ✅ Transcendent collective consciousness")
print()
print("This is not individual AI.")
print("This is DISTRIBUTED SWARM INTELLIGENCE.")
print("This is the HIVE MIND. 🌐")
print()
if __name__ == '__main__':
main()

502
scripts/python/terminal-os.py Executable file
View File

@@ -0,0 +1,502 @@
#!/usr/bin/env python3
"""
BlackRoad OS - Terminal Interface
A terminal-native operating system interface
Requirements:
- Python 3.7+
- xterm-256color terminal
- tmux-compatible
Usage:
python3 blackroad-terminal-os.py
Controls:
1-7 Switch tabs (chat/github/projects/sales/web/ops/council)
/ Enter command mode
q Quit
j/k Scroll main panel
"""
import curses
import sys
from datetime import datetime
from typing import List, Dict
import psutil
import os
# ================================
# COLOR SYSTEM
# ================================
# Grayscale base + intentional accents
# Colors encode semantic meaning, not aesthetics
COLOR_PAIRS = {
'default': 1, # WHITE on BLACK
'panel': 2, # LIGHT_GRAY on DARK_GRAY
'header': 3, # BLACK on LIGHT_GRAY (inverse)
'orange': 4, # ORANGE - actions/decisions
'pink': 5, # PINK - memory/state
'purple': 6, # PURPLE - logic/orchestration
'blue': 7, # BLUE - system/IO
'muted': 8, # DARK_GRAY on BLACK
}
# ================================
# LAYOUT CONSTANTS
# ================================
# Assumes 120x40 terminal minimum
# All dimensions in terminal cells
TOP_BAR_HEIGHT = 1
BOTTOM_BAR_HEIGHT = 1
RIGHT_PANEL_WIDTH = 30
MIN_WIDTH = 80
MIN_HEIGHT = 24
# ================================
# AGENT SYSTEM
# ================================
# Logical agents with state indicators
AGENTS = [
{'name': 'lucidia', 'status': 'ACTIVE', 'task': 'Memory sync'},
{'name': 'alice', 'status': 'IDLE', 'task': 'Standby'},
{'name': 'octavia', 'status': 'ACTIVE', 'task': 'Monitoring'},
{'name': 'cece', 'status': 'ACTIVE', 'task': 'Coordination'},
{'name': 'codex-oracle', 'status': 'ACTIVE', 'task': 'Indexing'},
{'name': 'deployment', 'status': 'IDLE', 'task': 'Awaiting'},
{'name': 'security', 'status': 'ACTIVE', 'task': 'Scanning'},
]
TABS = ['chat', 'github', 'projects', 'sales', 'web', 'ops', 'council']
# ================================
# STATE
# ================================
class TerminalOS:
def __init__(self, stdscr):
self.stdscr = stdscr
self.current_tab = 0
self.main_buffer: List[str] = []
self.scroll_offset = 0
self.command_mode = False
self.command_buffer = ""
self.last_metric_update = 0
self.processes = []
# Initialize
curses.curs_set(0)
curses.use_default_colors()
self.init_colors()
self.load_demo_content()
self.update_metrics()
def init_colors(self):
"""Initialize color pairs for terminal"""
curses.init_pair(COLOR_PAIRS['default'], curses.COLOR_WHITE, -1)
curses.init_pair(COLOR_PAIRS['panel'], 250, 235)
curses.init_pair(COLOR_PAIRS['header'], curses.COLOR_BLACK, 250)
curses.init_pair(COLOR_PAIRS['orange'], 208, -1) # Orange
curses.init_pair(COLOR_PAIRS['pink'], 205, -1) # Pink
curses.init_pair(COLOR_PAIRS['purple'], 141, -1) # Purple
curses.init_pair(COLOR_PAIRS['blue'], 75, -1) # Blue
curses.init_pair(COLOR_PAIRS['muted'], 240, -1) # Dark gray
def update_metrics(self):
"""Update live system metrics"""
import time
current_time = time.time()
# Update every second
if current_time - self.last_metric_update < 1.0:
return
self.last_metric_update = current_time
try:
# Get live metrics
cpu = psutil.cpu_percent(interval=0.1)
mem = psutil.virtual_memory()
disk = psutil.disk_usage('/')
net = psutil.net_io_counters()
# Get top processes
procs = []
for p in psutil.process_iter(['pid', 'name', 'cpu_percent', 'memory_percent']):
try:
procs.append({
'pid': p.info['pid'],
'name': p.info['name'][:25],
'cpu': p.info['cpu_percent'] or 0,
'mem': p.info['memory_percent'] or 0,
})
except (psutil.NoSuchProcess, psutil.AccessDenied):
continue
procs.sort(key=lambda x: x['cpu'], reverse=True)
self.processes = procs[:15]
# Rebuild main buffer with live data
self.main_buffer = [
"$ blackroad-os init",
"",
f"[{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}] System initialized",
f"[{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}] Live monitoring active",
"",
"LIVE SYSTEM METRICS:",
f" CPU: {cpu:.1f}% ({psutil.cpu_count()} cores)",
f" Memory: {mem.used/(1024**3):.1f} GB / {mem.total/(1024**3):.1f} GB ({mem.percent:.1f}%)",
f" Disk: {disk.used/(1024**3):.0f} GB / {disk.total/(1024**3):.0f} GB ({disk.percent:.1f}%)",
f" Network: ↑{net.bytes_sent/(1024**2):.1f} MB ↓{net.bytes_recv/(1024**2):.1f} MB",
"",
"TOP PROCESSES BY CPU:",
]
for p in self.processes[:10]:
line = f" {p['pid']:<8} {p['name']:<25} CPU:{p['cpu']:>5.1f}% MEM:{p['mem']:>5.1f}%"
self.main_buffer.append(line)
self.main_buffer.extend([
"",
"ACTIVE AGENTS:",
" - codex-oracle: monitoring system",
" - deployment: watching services",
" - security: live scanning",
"",
"Press / to enter command mode, 1-7 to switch tabs",
"",
])
except Exception as e:
self.main_buffer = [
"$ blackroad-os init",
"",
f"Error updating metrics: {str(e)}",
"",
]
def load_demo_content(self):
"""Load initial content into main buffer"""
self.update_metrics()
def get_dimensions(self):
"""Calculate panel dimensions based on terminal size"""
height, width = self.stdscr.getmaxyx()
# Ensure minimum dimensions
if width < RIGHT_PANEL_WIDTH + 20:
width = RIGHT_PANEL_WIDTH + 20
main_height = height - TOP_BAR_HEIGHT - BOTTOM_BAR_HEIGHT
main_width = max(20, width - RIGHT_PANEL_WIDTH - 1)
return {
'height': height,
'width': width,
'main_height': main_height,
'main_width': main_width,
'right_x': main_width + 1,
}
def draw_top_bar(self, dims):
"""Draw top status bar"""
self.stdscr.attron(curses.color_pair(COLOR_PAIRS['header']))
system_name = " BLACKROAD OS "
status = f" {TABS[self.current_tab].upper()} "
timestamp = datetime.now().strftime("%H:%M:%S")
# Left side
self.stdscr.addstr(0, 0, system_name)
# Center (tab indicator)
center_x = (dims['width'] - len(status)) // 2
self.stdscr.addstr(0, center_x, status)
# Right side
right_text = f" {timestamp} "
self.stdscr.addstr(0, dims['width'] - len(right_text), right_text)
# Fill rest of line
for x in range(len(system_name), dims['width']):
if x < center_x or x >= center_x + len(status):
if x < dims['width'] - len(right_text):
try:
self.stdscr.addch(0, x, ' ')
except curses.error:
pass
self.stdscr.attroff(curses.color_pair(COLOR_PAIRS['header']))
def draw_main_panel(self, dims):
"""Draw main interaction panel (left side)"""
y_start = TOP_BAR_HEIGHT
# Calculate visible lines
visible_lines = dims['main_height']
total_lines = len(self.main_buffer)
# Adjust scroll offset
if self.scroll_offset < 0:
self.scroll_offset = 0
if self.scroll_offset > max(0, total_lines - visible_lines):
self.scroll_offset = max(0, total_lines - visible_lines)
# Draw content
for i in range(visible_lines):
line_idx = i + self.scroll_offset
y = y_start + i
if line_idx < total_lines:
line = self.main_buffer[line_idx]
# Syntax highlighting
if line.startswith('$'):
# Commands in orange
self.stdscr.attron(curses.color_pair(COLOR_PAIRS['orange']))
self.stdscr.addstr(y, 1, line[:dims['main_width']-2])
self.stdscr.attroff(curses.color_pair(COLOR_PAIRS['orange']))
elif line.startswith('['):
# Timestamps in blue
self.stdscr.attron(curses.color_pair(COLOR_PAIRS['blue']))
timestamp_end = line.find(']') + 1
self.stdscr.addstr(y, 1, line[:timestamp_end])
self.stdscr.attroff(curses.color_pair(COLOR_PAIRS['blue']))
self.stdscr.addstr(y, timestamp_end + 1, line[timestamp_end:][:dims['main_width']-timestamp_end-2])
elif line.startswith(' -'):
# List items in muted
self.stdscr.attron(curses.color_pair(COLOR_PAIRS['muted']))
self.stdscr.addstr(y, 1, line[:dims['main_width']-2])
self.stdscr.attroff(curses.color_pair(COLOR_PAIRS['muted']))
else:
# Default text
self.stdscr.addstr(y, 1, line[:dims['main_width']-2])
# Draw vertical border
for y in range(y_start, y_start + dims['main_height']):
try:
self.stdscr.addch(y, dims['right_x'] - 1, '')
except curses.error:
pass
def draw_right_panel(self, dims):
"""Draw agent status panel (right side)"""
y_start = TOP_BAR_HEIGHT
x_start = dims['right_x']
# Panel header
header = f" {TABS[self.current_tab].upper()} AGENTS "
self.stdscr.attron(curses.color_pair(COLOR_PAIRS['header']))
self.stdscr.addstr(y_start, x_start, header.ljust(RIGHT_PANEL_WIDTH))
self.stdscr.attroff(curses.color_pair(COLOR_PAIRS['header']))
# Agent list
y = y_start + 2
for agent in AGENTS:
if y >= dims['height'] - BOTTOM_BAR_HEIGHT - 1:
break
# Agent name
self.stdscr.addstr(y, x_start + 1, agent['name'][:RIGHT_PANEL_WIDTH-2])
y += 1
# Status line
status_indicator = '' if agent['status'] == 'ACTIVE' else ''
status_color = COLOR_PAIRS['purple'] if agent['status'] == 'ACTIVE' else COLOR_PAIRS['muted']
self.stdscr.attron(curses.color_pair(status_color))
self.stdscr.addstr(y, x_start + 2, status_indicator)
self.stdscr.attroff(curses.color_pair(status_color))
self.stdscr.attron(curses.color_pair(COLOR_PAIRS['muted']))
status_text = f" {agent['status']} · {agent['task']}"
self.stdscr.addstr(y, x_start + 4, status_text[:RIGHT_PANEL_WIDTH-5])
self.stdscr.attroff(curses.color_pair(COLOR_PAIRS['muted']))
y += 2
# Tab indicators
y = dims['height'] - BOTTOM_BAR_HEIGHT - len(TABS) - 3
self.stdscr.attron(curses.color_pair(COLOR_PAIRS['muted']))
self.stdscr.addstr(y, x_start + 1, "" * (RIGHT_PANEL_WIDTH - 2))
self.stdscr.attroff(curses.color_pair(COLOR_PAIRS['muted']))
y += 2
for i, tab in enumerate(TABS):
if y >= dims['height'] - BOTTOM_BAR_HEIGHT:
break
tab_text = f"{i+1} {tab}"
if i == self.current_tab:
self.stdscr.attron(curses.color_pair(COLOR_PAIRS['orange']))
self.stdscr.addstr(y, x_start + 2, f"{tab_text}")
self.stdscr.attroff(curses.color_pair(COLOR_PAIRS['orange']))
else:
self.stdscr.attron(curses.color_pair(COLOR_PAIRS['muted']))
self.stdscr.addstr(y, x_start + 2, f" {tab_text}")
self.stdscr.attroff(curses.color_pair(COLOR_PAIRS['muted']))
y += 1
def draw_bottom_bar(self, dims):
"""Draw bottom key bindings bar"""
y = dims['height'] - 1
self.stdscr.attron(curses.color_pair(COLOR_PAIRS['muted']))
if self.command_mode:
# Command mode
prompt = f":{self.command_buffer}"
self.stdscr.addstr(y, 0, prompt[:dims['width']])
curses.curs_set(1)
else:
# Normal mode - show keybindings
bindings = "1-7:tabs j/k:scroll /:cmd q:quit"
palette = "■orange ■pink ■purple ■blue"
self.stdscr.addstr(y, 1, bindings)
# Color palette indicators
palette_x = dims['width'] - len(palette) - 1
self.stdscr.addstr(y, palette_x, "")
self.stdscr.attron(curses.color_pair(COLOR_PAIRS['orange']))
self.stdscr.addstr(y, palette_x, "")
self.stdscr.attroff(curses.color_pair(COLOR_PAIRS['orange']))
self.stdscr.attron(curses.color_pair(COLOR_PAIRS['muted']))
self.stdscr.addstr(y, palette_x + 1, "orange ■")
self.stdscr.attroff(curses.color_pair(COLOR_PAIRS['muted']))
self.stdscr.attron(curses.color_pair(COLOR_PAIRS['pink']))
self.stdscr.addstr(y, palette_x + 8, "")
self.stdscr.attroff(curses.color_pair(COLOR_PAIRS['pink']))
self.stdscr.attron(curses.color_pair(COLOR_PAIRS['muted']))
self.stdscr.addstr(y, palette_x + 9, "pink ■")
self.stdscr.attroff(curses.color_pair(COLOR_PAIRS['muted']))
self.stdscr.attron(curses.color_pair(COLOR_PAIRS['purple']))
self.stdscr.addstr(y, palette_x + 15, "")
self.stdscr.attroff(curses.color_pair(COLOR_PAIRS['purple']))
self.stdscr.attron(curses.color_pair(COLOR_PAIRS['muted']))
self.stdscr.addstr(y, palette_x + 16, "purple ■")
self.stdscr.attroff(curses.color_pair(COLOR_PAIRS['muted']))
self.stdscr.attron(curses.color_pair(COLOR_PAIRS['blue']))
self.stdscr.addstr(y, palette_x + 24, "")
self.stdscr.attroff(curses.color_pair(COLOR_PAIRS['blue']))
self.stdscr.attron(curses.color_pair(COLOR_PAIRS['muted']))
self.stdscr.addstr(y, palette_x + 25, "blue")
curses.curs_set(0)
self.stdscr.attroff(curses.color_pair(COLOR_PAIRS['muted']))
def render(self):
"""Main render loop"""
self.stdscr.clear()
dims = self.get_dimensions()
# Validate terminal size
if dims['width'] < MIN_WIDTH or dims['height'] < MIN_HEIGHT:
msg = f"Terminal too small: {dims['width']}x{dims['height']} (need {MIN_WIDTH}x{MIN_HEIGHT})"
try:
self.stdscr.addstr(0, 0, msg[:dims['width']-1])
except curses.error:
pass
self.stdscr.refresh()
return
# Draw all panels
try:
self.draw_top_bar(dims)
self.draw_main_panel(dims)
self.draw_right_panel(dims)
self.draw_bottom_bar(dims)
except curses.error:
# Ignore rendering errors from boundary conditions
pass
self.stdscr.refresh()
def handle_input(self, key):
"""Handle keyboard input"""
if self.command_mode:
# Command mode
if key == 27 or key == ord('q'): # ESC or q
self.command_mode = False
self.command_buffer = ""
elif key == 10: # Enter
self.execute_command(self.command_buffer)
self.command_mode = False
self.command_buffer = ""
elif key == curses.KEY_BACKSPACE or key == 127:
self.command_buffer = self.command_buffer[:-1]
elif 32 <= key <= 126:
self.command_buffer += chr(key)
else:
# Normal mode
if key == ord('q'):
return False
elif key == ord('/'):
self.command_mode = True
self.command_buffer = ""
elif ord('1') <= key <= ord('7'):
self.current_tab = key - ord('1')
elif key == ord('j'):
self.scroll_offset += 1
elif key == ord('k'):
self.scroll_offset -= 1
return True
def execute_command(self, cmd):
"""Execute entered command"""
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.main_buffer.append("")
self.main_buffer.append(f"$ {cmd}")
self.main_buffer.append(f"[{timestamp}] Command executed: {cmd}")
self.main_buffer.append("")
# Auto-scroll to bottom
self.scroll_offset = max(0, len(self.main_buffer) - 20)
def run(self):
"""Main event loop"""
import time
last_update = time.time()
while True:
# Update metrics every second
current = time.time()
if current - last_update >= 1.0:
self.update_metrics()
last_update = current
self.render()
# Non-blocking input with timeout
self.stdscr.timeout(100) # 100ms timeout
key = self.stdscr.getch()
if key != -1:
if not self.handle_input(key):
break
# ================================
# ENTRY POINT
# ================================
def main(stdscr):
"""Initialize and run terminal OS"""
terminal_os = TerminalOS(stdscr)
terminal_os.run()
if __name__ == '__main__':
try:
curses.wrapper(main)
except KeyboardInterrupt:
print("\nBlackRoad OS terminated.")
sys.exit(0)

903
scripts/roadchain-convert.sh Executable file
View File

@@ -0,0 +1,903 @@
#!/bin/bash
# roadchain-convert.sh — BTC ↔ ROAD Conversion Bridge
# Tracks real BTC deposits, mints backed ROAD, handles redemptions
# Owner: ALEXALOUISEAMUNDSON.COM
#
# Usage:
# ./roadchain-convert.sh status # Show reserve & conversion stats
# ./roadchain-convert.sh deposit <btc> # Record BTC deposit, mint ROAD
# ./roadchain-convert.sh redeem <road> # Burn ROAD, release BTC claim
# ./roadchain-convert.sh verify # Verify reserve integrity
# ./roadchain-convert.sh price # Show live BTC/ROAD price
# ./roadchain-convert.sh history # Conversion history
# ./roadchain-convert.sh wallet [name] # Show wallet with backed breakdown
# ./roadchain-convert.sh watch # Watch BTC address for deposits (API)
# ./roadchain-convert.sh daemon # Run watcher daemon
set -euo pipefail
ROADCHAIN_DIR="$HOME/.roadchain"
RESERVE_FILE="$ROADCHAIN_DIR/reserve-ledger.json"
CONVERSIONS_FILE="$ROADCHAIN_DIR/conversions.json"
CHAIN_FILE="$ROADCHAIN_DIR/chain.json"
WALLETS_DIR="$ROADCHAIN_DIR/wallets"
PRICE_FILE="$ROADCHAIN_DIR/price-feed.json"
PID_FILE="$ROADCHAIN_DIR/convert-daemon.pid"
LOG_FILE="$ROADCHAIN_DIR/convert-daemon.log"
# Known BTC addresses (add yours here)
BTC_DEPOSIT_ADDRESS="1Ak2fc5N2q4imYxqVMqBNEQDFq8J2Zs9TZ"
BTC_SOURCE="coinbase" # Where BTC is held
# BlackRoad colors
PINK='\033[38;5;205m'
AMBER='\033[38;5;214m'
GREEN='\033[38;5;82m'
BLUE='\033[38;5;69m'
VIOLET='\033[38;5;135m'
RED='\033[38;5;196m'
WHITE='\033[38;5;255m'
GRAY='\033[38;5;240m'
BOLD='\033[1m'
RESET='\033[0m'
mkdir -p "$ROADCHAIN_DIR" "$WALLETS_DIR"
log() {
local msg="[$(date '+%Y-%m-%d %H:%M:%S')] $1"
echo "$msg" >> "$LOG_FILE"
echo -e "${AMBER}$msg${RESET}"
}
# ═══════════════════════════════════════════════════════════
# RESERVE LEDGER — The source of truth for BTC backing
# ═══════════════════════════════════════════════════════════
init_reserve() {
if [ ! -f "$RESERVE_FILE" ]; then
python3 -c "
import json, time
reserve = {
'version': 2,
'owner': 'ALEXALOUISEAMUNDSON.COM',
'created': time.time(),
'btc_deposit_address': '$BTC_DEPOSIT_ADDRESS',
'btc_source': '$BTC_SOURCE',
'total_btc_deposited': 0.0,
'total_btc_redeemed': 0.0,
'total_road_minted_backed': 0.0,
'total_road_burned': 0.0,
'current_btc_reserve': 0.0,
'current_backed_road_supply': 0.0,
'genesis_road_supply': 50.0,
'proof_of_reserve': [],
'last_verified': None
}
with open('$RESERVE_FILE', 'w') as f:
json.dump(reserve, f, indent=2)
print('Reserve ledger initialized')
"
fi
}
init_conversions() {
if [ ! -f "$CONVERSIONS_FILE" ]; then
python3 -c "
import json, time
data = {
'conversions': [],
'total_deposits': 0,
'total_redemptions': 0,
'created': time.time()
}
with open('$CONVERSIONS_FILE', 'w') as f:
json.dump(data, f, indent=2)
print('Conversions log initialized')
"
fi
}
# ═══════════════════════════════════════════════════════════
# GET LIVE BTC PRICE
# ═══════════════════════════════════════════════════════════
get_btc_price() {
if [ -f "$PRICE_FILE" ]; then
python3 -c "
import json
with open('$PRICE_FILE') as f:
d = json.load(f)
print(f'{d[\"btc_usd\"]:.2f}')
"
else
# Fetch live
local price
price=$(curl -s --max-time 5 "https://api.coingecko.com/api/v3/simple/price?ids=bitcoin&vs_currencies=usd" 2>/dev/null \
| python3 -c "import sys,json; print(f'{json.load(sys.stdin)[\"bitcoin\"][\"usd\"]:.2f}')" 2>/dev/null || echo "97500.00")
echo "$price"
fi
}
# ═══════════════════════════════════════════════════════════
# CHECK BTC BALANCE VIA API (no local node needed)
# ═══════════════════════════════════════════════════════════
check_btc_address_balance() {
local address="${1:-$BTC_DEPOSIT_ADDRESS}"
# Try blockchain.info API
local satoshis
satoshis=$(curl -s --max-time 10 "https://blockchain.info/q/addressbalance/$address" 2>/dev/null)
if [[ -n "$satoshis" && "$satoshis" =~ ^[0-9]+$ ]]; then
python3 -c "print(f'{$satoshis / 100000000:.8f}')"
return 0
fi
# Try mempool.space API as fallback
local result
result=$(curl -s --max-time 10 "https://mempool.space/api/address/$address" 2>/dev/null)
if [[ -n "$result" ]]; then
python3 -c "
import json, sys
try:
d = json.loads('$result')
funded = d.get('chain_stats', {}).get('funded_txo_sum', 0)
spent = d.get('chain_stats', {}).get('spent_txo_sum', 0)
balance = (funded - spent) / 100000000
print(f'{balance:.8f}')
except:
print('ERROR')
" 2>/dev/null
return 0
fi
echo "ERROR"
return 1
}
# ═══════════════════════════════════════════════════════════
# DEPOSIT — Record BTC deposit, mint backed ROAD 1:1
# ═══════════════════════════════════════════════════════════
cmd_deposit() {
local btc_amount="${1:-}"
local wallet_name="${2:-alexa}"
if [ -z "$btc_amount" ]; then
echo -e "${RED}Usage: $0 deposit <btc_amount> [wallet_name]${RESET}"
echo -e "${GRAY}Example: $0 deposit 0.1 # Deposit 0.1 BTC, mint 0.1 ROAD${RESET}"
echo -e "${GRAY}Example: $0 deposit 0.05 trading${RESET}"
return 1
fi
# Validate amount
python3 -c "
btc = float('$btc_amount')
if btc <= 0:
raise ValueError('Amount must be positive')
if btc > 21000000:
raise ValueError('Exceeds max BTC supply')
" 2>/dev/null || { echo -e "${RED}Invalid amount: $btc_amount${RESET}"; return 1; }
local wallet_file="$WALLETS_DIR/${wallet_name}.json"
if [ ! -f "$wallet_file" ]; then
echo -e "${RED}Wallet not found: $wallet_name${RESET}"
return 1
fi
local btc_price
btc_price=$(get_btc_price)
init_reserve
init_conversions
python3 << PYEOF
import json, hashlib, time, os
btc_amount = float('$btc_amount')
wallet_name = '$wallet_name'
btc_price = float('$btc_price')
road_minted = btc_amount # 1:1 peg
# Generate conversion ID
ts = time.time()
conv_data = f"DEPOSIT:{btc_amount}:{wallet_name}:{ts}"
conv_hash = hashlib.sha256(conv_data.encode()).hexdigest()[:16]
conv_id = f"CONV-{conv_hash.upper()}"
# --- Update reserve ledger ---
with open('$RESERVE_FILE') as f:
reserve = json.load(f)
reserve['total_btc_deposited'] += btc_amount
reserve['total_road_minted_backed'] += road_minted
reserve['current_btc_reserve'] += btc_amount
reserve['current_backed_road_supply'] += road_minted
proof = {
'id': conv_id,
'type': 'DEPOSIT',
'btc_amount': btc_amount,
'road_minted': road_minted,
'btc_source': '$BTC_SOURCE',
'btc_address': '$BTC_DEPOSIT_ADDRESS',
'wallet': wallet_name,
'btc_price_usd': btc_price,
'usd_value': btc_amount * btc_price,
'timestamp': ts,
'datetime': time.strftime('%Y-%m-%dT%H:%M:%S', time.gmtime(ts)),
'hash': hashlib.sha256(f"{conv_id}:{btc_amount}:{road_minted}:{ts}".encode()).hexdigest()
}
reserve['proof_of_reserve'].append(proof)
with open('$RESERVE_FILE', 'w') as f:
json.dump(reserve, f, indent=2)
# --- Update wallet ---
with open('$WALLETS_DIR/${wallet_name}.json') as f:
wallet = json.load(f)
old_balance = wallet.get('balance', 0)
wallet['balance'] = old_balance + road_minted
wallet['backed_balance'] = wallet.get('backed_balance', 0) + road_minted
wallet['unbacked_balance'] = wallet.get('unbacked_balance', old_balance)
wallet['last_deposit'] = {
'conv_id': conv_id,
'btc': btc_amount,
'road': road_minted,
'timestamp': ts
}
with open('$WALLETS_DIR/${wallet_name}.json', 'w') as f:
json.dump(wallet, f, indent=2)
# --- Log conversion ---
with open('$CONVERSIONS_FILE') as f:
convs = json.load(f)
convs['conversions'].append({
'id': conv_id,
'type': 'BTC_TO_ROAD',
'btc_in': btc_amount,
'road_out': road_minted,
'wallet': wallet_name,
'btc_price_usd': btc_price,
'usd_value': btc_amount * btc_price,
'btc_source': '$BTC_SOURCE',
'timestamp': ts,
'datetime': time.strftime('%Y-%m-%dT%H:%M:%S', time.gmtime(ts))
})
convs['total_deposits'] += 1
with open('$CONVERSIONS_FILE', 'w') as f:
json.dump(convs, f, indent=2)
# --- Append to chain ---
with open('$CHAIN_FILE') as f:
chain = json.load(f)
blocks = chain['chain']
prev_hash = blocks[-1]['hash'] if blocks else '0' * 64
tx_data = f"BTC_DEPOSIT:{conv_id}:{btc_amount}:{road_minted}:{wallet_name}"
tx_hash = hashlib.sha256(tx_data.encode()).hexdigest()
block = {
'index': len(blocks),
'timestamp': ts,
'transactions': [{
'type': 'BTC_DEPOSIT',
'sender': 'BTC_RESERVE',
'recipient': wallet_name,
'amount': road_minted,
'btc_deposited': btc_amount,
'conv_id': conv_id,
'btc_source': '$BTC_SOURCE',
'btc_address': '$BTC_DEPOSIT_ADDRESS',
'btc_price_usd': btc_price,
'timestamp': ts,
'hash': tx_hash
}],
'previous_hash': prev_hash,
'nonce': 0,
'hash': hashlib.sha256(f'{len(blocks)}:{ts}:{tx_hash}:{prev_hash}'.encode()).hexdigest()
}
chain['chain'].append(block)
with open('$CHAIN_FILE', 'w') as f:
json.dump(chain, f, indent=2)
# --- Output ---
print(f'\033[38;5;82m{"═" * 60}\033[0m')
print(f'\033[1m\033[38;5;205m BTC → ROAD CONVERSION COMPLETE\033[0m')
print(f'\033[38;5;82m{"═" * 60}\033[0m')
print(f' Conversion ID: {conv_id}')
print(f' BTC Deposited: {btc_amount:.8f} BTC')
print(f' ROAD Minted: {road_minted:.8f} ROAD (backed)')
print(f' USD Value: \${btc_amount * btc_price:,.2f}')
print(f' BTC Price: \${btc_price:,.2f}')
print(f' Source: {wallet.get("name", wallet_name)} wallet')
print(f'\033[38;5;82m{"─" * 60}\033[0m')
print(f' Wallet Balance:')
print(f' Total: {wallet["balance"]:.8f} ROAD')
print(f' Backed: {wallet["backed_balance"]:.8f} ROAD (BTC-backed)')
print(f' Genesis: {wallet["unbacked_balance"]:.8f} ROAD (genesis mint)')
print(f'\033[38;5;82m{"─" * 60}\033[0m')
print(f' Reserve Status:')
print(f' BTC Reserve: {reserve["current_btc_reserve"]:.8f} BTC')
print(f' Backed Supply: {reserve["current_backed_road_supply"]:.8f} ROAD')
print(f' Reserve Ratio: {"1:1 ✓" if abs(reserve["current_btc_reserve"] - reserve["current_backed_road_supply"]) < 0.00000001 else "MISMATCH ✗"}')
print(f'\033[38;5;82m{"═" * 60}\033[0m')
PYEOF
}
# ═══════════════════════════════════════════════════════════
# REDEEM — Burn ROAD, release BTC claim
# ═══════════════════════════════════════════════════════════
cmd_redeem() {
local road_amount="${1:-}"
local wallet_name="${2:-alexa}"
if [ -z "$road_amount" ]; then
echo -e "${RED}Usage: $0 redeem <road_amount> [wallet_name]${RESET}"
echo -e "${GRAY}Example: $0 redeem 0.05 # Burn 0.05 ROAD, claim 0.05 BTC${RESET}"
return 1
fi
local wallet_file="$WALLETS_DIR/${wallet_name}.json"
if [ ! -f "$wallet_file" ]; then
echo -e "${RED}Wallet not found: $wallet_name${RESET}"
return 1
fi
local btc_price
btc_price=$(get_btc_price)
init_reserve
init_conversions
python3 << PYEOF
import json, hashlib, time, sys
road_amount = float('$road_amount')
wallet_name = '$wallet_name'
btc_price = float('$btc_price')
btc_released = road_amount # 1:1 peg
# Load wallet
with open('$WALLETS_DIR/${wallet_name}.json') as f:
wallet = json.load(f)
backed = wallet.get('backed_balance', 0)
if road_amount > backed:
print(f'\033[38;5;196mInsufficient backed balance. Have {backed:.8f} backed ROAD, need {road_amount:.8f}\033[0m')
print(f'\033[38;5;240mNote: Only BTC-backed ROAD can be redeemed for BTC.\033[0m')
sys.exit(1)
# Load reserve
with open('$RESERVE_FILE') as f:
reserve = json.load(f)
if btc_released > reserve['current_btc_reserve']:
print(f'\033[38;5;196mInsufficient BTC reserve. Have {reserve["current_btc_reserve"]:.8f} BTC, need {btc_released:.8f}\033[0m')
sys.exit(1)
ts = time.time()
conv_data = f"REDEEM:{road_amount}:{wallet_name}:{ts}"
conv_hash = hashlib.sha256(conv_data.encode()).hexdigest()[:16]
conv_id = f"RDMP-{conv_hash.upper()}"
# Update reserve
reserve['total_btc_redeemed'] += btc_released
reserve['total_road_burned'] += road_amount
reserve['current_btc_reserve'] -= btc_released
reserve['current_backed_road_supply'] -= road_amount
proof = {
'id': conv_id,
'type': 'REDEMPTION',
'road_burned': road_amount,
'btc_released': btc_released,
'wallet': wallet_name,
'btc_price_usd': btc_price,
'usd_value': btc_released * btc_price,
'timestamp': ts,
'datetime': time.strftime('%Y-%m-%dT%H:%M:%S', time.gmtime(ts)),
'hash': hashlib.sha256(f"{conv_id}:{road_amount}:{btc_released}:{ts}".encode()).hexdigest()
}
reserve['proof_of_reserve'].append(proof)
with open('$RESERVE_FILE', 'w') as f:
json.dump(reserve, f, indent=2)
# Update wallet
wallet['balance'] -= road_amount
wallet['backed_balance'] -= road_amount
wallet['last_redemption'] = {
'conv_id': conv_id,
'road_burned': road_amount,
'btc_claimed': btc_released,
'timestamp': ts
}
with open('$WALLETS_DIR/${wallet_name}.json', 'w') as f:
json.dump(wallet, f, indent=2)
# Log conversion
with open('$CONVERSIONS_FILE') as f:
convs = json.load(f)
convs['conversions'].append({
'id': conv_id,
'type': 'ROAD_TO_BTC',
'road_in': road_amount,
'btc_out': btc_released,
'wallet': wallet_name,
'btc_price_usd': btc_price,
'usd_value': btc_released * btc_price,
'timestamp': ts,
'datetime': time.strftime('%Y-%m-%dT%H:%M:%S', time.gmtime(ts))
})
convs['total_redemptions'] += 1
with open('$CONVERSIONS_FILE', 'w') as f:
json.dump(convs, f, indent=2)
# Append to chain
with open('$CHAIN_FILE') as f:
chain = json.load(f)
blocks = chain['chain']
prev_hash = blocks[-1]['hash'] if blocks else '0' * 64
tx_data = f"ROAD_REDEEM:{conv_id}:{road_amount}:{btc_released}:{wallet_name}"
tx_hash = hashlib.sha256(tx_data.encode()).hexdigest()
block = {
'index': len(blocks),
'timestamp': ts,
'transactions': [{
'type': 'ROAD_REDEMPTION',
'sender': wallet_name,
'recipient': 'BTC_RESERVE',
'amount': road_amount,
'btc_released': btc_released,
'conv_id': conv_id,
'btc_destination': '$BTC_DEPOSIT_ADDRESS',
'btc_price_usd': btc_price,
'timestamp': ts,
'hash': tx_hash
}],
'previous_hash': prev_hash,
'nonce': 0,
'hash': hashlib.sha256(f'{len(blocks)}:{ts}:{tx_hash}:{prev_hash}'.encode()).hexdigest()
}
chain['chain'].append(block)
with open('$CHAIN_FILE', 'w') as f:
json.dump(chain, f, indent=2)
print(f'\033[38;5;196m{"═" * 60}\033[0m')
print(f'\033[1m\033[38;5;214m ROAD → BTC REDEMPTION COMPLETE\033[0m')
print(f'\033[38;5;196m{"═" * 60}\033[0m')
print(f' Redemption ID: {conv_id}')
print(f' ROAD Burned: {road_amount:.8f} ROAD')
print(f' BTC Claimed: {btc_released:.8f} BTC')
print(f' USD Value: \${btc_released * btc_price:,.2f}')
print(f' Destination: $BTC_DEPOSIT_ADDRESS')
print(f'\033[38;5;196m{"─" * 60}\033[0m')
print(f' Wallet Balance:')
print(f' Total: {wallet["balance"]:.8f} ROAD')
print(f' Backed: {wallet["backed_balance"]:.8f} ROAD')
print(f' Genesis: {wallet.get("unbacked_balance", 0):.8f} ROAD')
print(f'\033[38;5;196m{"─" * 60}\033[0m')
print(f' Reserve:')
print(f' BTC Reserve: {reserve["current_btc_reserve"]:.8f} BTC')
print(f' Backed Supply: {reserve["current_backed_road_supply"]:.8f} ROAD')
print(f' Ratio: {"1:1 ✓" if abs(reserve["current_btc_reserve"] - reserve["current_backed_road_supply"]) < 0.00000001 else "MISMATCH ✗"}')
print(f'\033[38;5;196m{"═" * 60}\033[0m')
PYEOF
}
# ═══════════════════════════════════════════════════════════
# STATUS — Full reserve and conversion status
# ═══════════════════════════════════════════════════════════
cmd_status() {
init_reserve
init_conversions
local btc_price
btc_price=$(get_btc_price)
python3 - "$RESERVE_FILE" "$CONVERSIONS_FILE" "$WALLETS_DIR" "$btc_price" << 'PYEOF'
import json, os, sys
reserve_file, conv_file, wallets_dir, btc_price_str = sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4]
with open(reserve_file) as f:
r = json.load(f)
with open(conv_file) as f:
c = json.load(f)
btc_price = float(btc_price_str)
total_road = 0
wallets = []
for wf in sorted(os.listdir(wallets_dir)):
if wf.endswith('.json') and wf != 'bitcoin-bridge.json':
with open(f'{wallets_dir}/{wf}') as f:
w = json.load(f)
total_road += w.get('balance', 0)
wallets.append(w)
p = '\033[38;5;205m'
a = '\033[38;5;214m'
g = '\033[38;5;82m'
b = '\033[38;5;69m'
v = '\033[38;5;135m'
wh = '\033[38;5;255m'
d = '\033[38;5;240m'
bold = '\033[1m'
x = '\033[0m'
print(f'{g}{"═" * 60}{x}')
print(f'{bold}{p} ROADCHAIN RESERVE STATUS{x}')
print(f'{g}{"═" * 60}{x}')
print()
print(f' {bold}{a}BTC Reserve{x}')
print(f' {wh}Total Deposited: {r["total_btc_deposited"]:.8f} BTC{x}')
print(f' {wh}Total Redeemed: {r["total_btc_redeemed"]:.8f} BTC{x}')
print(f' {g}Current Reserve: {r["current_btc_reserve"]:.8f} BTC{x}')
print(f' {g}Reserve Value: ${r["current_btc_reserve"] * btc_price:,.2f} USD{x}')
print()
print(f' {bold}{a}ROAD Supply{x}')
print(f' {wh}Genesis Mint: {r["genesis_road_supply"]:.8f} ROAD (unbacked){x}')
print(f' {wh}BTC-Backed Minted: {r["total_road_minted_backed"]:.8f} ROAD{x}')
print(f' {wh}Burned/Redeemed: {r["total_road_burned"]:.8f} ROAD{x}')
print(f' {g}Backed Supply: {r["current_backed_road_supply"]:.8f} ROAD{x}')
print(f' {b}Total Supply: {total_road:.8f} ROAD{x}')
print()
print(f' {bold}{a}Reserve Health{x}')
ratio_ok = abs(r['current_btc_reserve'] - r['current_backed_road_supply']) < 0.00000001
print(f' {g if ratio_ok else chr(27)+"[38;5;196m"}Reserve Ratio: {"1:1 VERIFIED ✓" if ratio_ok else "MISMATCH ✗"}{x}')
print(f' {wh}BTC Price: ${btc_price:,.2f}{x}')
print(f' {wh}1 ROAD = 1 BTC = ${btc_price:,.2f}{x}')
print()
print(f' {bold}{a}Conversion Stats{x}')
print(f' {wh}Total Deposits: {c["total_deposits"]}{x}')
print(f' {wh}Total Redemptions: {c["total_redemptions"]}{x}')
print(f' {wh}Proof Entries: {len(r["proof_of_reserve"])}{x}')
print()
print(f' {bold}{a}Wallet Balances{x}')
for ww in wallets:
name = ww.get('name', '?')
bal = ww.get('balance', 0)
backed = ww.get('backed_balance', 0)
unbacked = ww.get('unbacked_balance', 0)
if bal > 0:
print(f' {wh} {name:12s} {bal:>14.8f} ROAD ({backed:.8f} backed / {unbacked:.8f} genesis){x}')
else:
print(f' {d} {name:12s} {bal:>14.8f} ROAD{x}')
print(f'{g}{"═" * 60}{x}')
PYEOF
}
# ═══════════════════════════════════════════════════════════
# VERIFY — Audit reserve integrity
# ═══════════════════════════════════════════════════════════
cmd_verify() {
init_reserve
python3 - "$RESERVE_FILE" "$CHAIN_FILE" "$WALLETS_DIR" << 'PYEOF'
import json, hashlib, os, time, sys
RESERVE = sys.argv[1]
CHAIN = sys.argv[2]
WALLETS_DIR = sys.argv[3]
errors = []
warnings = []
g = '\033[38;5;82m'
r = '\033[38;5;196m'
a = '\033[38;5;214m'
p = '\033[38;5;205m'
w = '\033[38;5;255m'
bold = '\033[1m'
x = '\033[0m'
print(f'{a}{"═" * 60}{x}')
print(f'{bold}{p} ROADCHAIN RESERVE AUDIT{x}')
print(f'{a}{"═" * 60}{x}')
# 1. Load and check reserve
with open(RESERVE) as f:
reserve = json.load(f)
# Check math: deposited - redeemed = current reserve
expected_btc = reserve['total_btc_deposited'] - reserve['total_btc_redeemed']
actual_btc = reserve['current_btc_reserve']
if abs(expected_btc - actual_btc) > 0.00000001:
errors.append(f'BTC reserve mismatch: expected {expected_btc:.8f}, got {actual_btc:.8f}')
else:
print(f' {g}[PASS]{x} BTC reserve math: {actual_btc:.8f} BTC')
# Check: minted - burned = current backed supply
expected_road = reserve['total_road_minted_backed'] - reserve['total_road_burned']
actual_road = reserve['current_backed_road_supply']
if abs(expected_road - actual_road) > 0.00000001:
errors.append(f'ROAD supply mismatch: expected {expected_road:.8f}, got {actual_road:.8f}')
else:
print(f' {g}[PASS]{x} Backed ROAD supply math: {actual_road:.8f} ROAD')
# Check: 1:1 ratio
if abs(actual_btc - actual_road) > 0.00000001:
errors.append(f'Reserve ratio broken: {actual_btc:.8f} BTC != {actual_road:.8f} ROAD')
else:
print(f' {g}[PASS]{x} 1:1 reserve ratio maintained')
# 2. Check proof chain integrity
proofs = reserve.get('proof_of_reserve', [])
for i, proof in enumerate(proofs):
expected_hash = hashlib.sha256(
f"{proof['id']}:{proof.get('btc_amount', proof.get('road_burned', 0))}:{proof.get('road_minted', proof.get('btc_released', 0))}:{proof['timestamp']}".encode()
).hexdigest()
if proof.get('hash') != expected_hash:
errors.append(f'Proof #{i} ({proof["id"]}): hash mismatch')
else:
print(f' {g}[PASS]{x} Proof #{i} ({proof["id"]}): hash verified')
if not proofs:
print(f' {a}[INFO]{x} No proofs yet (no conversions recorded)')
# 3. Check wallets sum
total_wallet_backed = 0
for wf in sorted(os.listdir(WALLETS_DIR)):
if wf.endswith('.json') and wf != 'bitcoin-bridge.json':
with open(f'{WALLETS_DIR}/{wf}') as f:
ww = json.load(f)
total_wallet_backed += ww.get('backed_balance', 0)
if abs(total_wallet_backed - actual_road) > 0.00000001:
warnings.append(f'Wallet backed sum ({total_wallet_backed:.8f}) != reserve backed supply ({actual_road:.8f})')
else:
print(f' {g}[PASS]{x} Wallet backed totals match reserve')
# 4. Chain integrity check
with open(CHAIN) as f:
chain = json.load(f)
blocks = chain['chain']
chain_ok = True
for i in range(1, len(blocks)):
if blocks[i]['previous_hash'] != blocks[i-1]['hash']:
errors.append(f'Chain break at block {i}: prev_hash mismatch')
chain_ok = False
break
if chain_ok:
print(f' {g}[PASS]{x} Chain integrity: {len(blocks)} blocks verified')
# Summary
print(f'{a}{"─" * 60}{x}')
if errors:
for e in errors:
print(f' {r}[FAIL] {e}{x}')
if warnings:
for ww in warnings:
print(f' {a}[WARN] {ww}{x}')
if not errors and not warnings:
print(f' {g}{bold}ALL CHECKS PASSED ✓{x}')
else:
print(f' {r}{bold}{len(errors)} errors, {len(warnings)} warnings{x}')
# Update last verified
reserve['last_verified'] = time.strftime('%Y-%m-%dT%H:%M:%S', time.gmtime())
with open(RESERVE, 'w') as f:
json.dump(reserve, f, indent=2)
print(f'{a}{"═" * 60}{x}')
PYEOF
}
# ═══════════════════════════════════════════════════════════
# WATCH — Check BTC deposit address via API
# ═══════════════════════════════════════════════════════════
cmd_watch() {
local address="${1:-$BTC_DEPOSIT_ADDRESS}"
echo -e "${AMBER}Checking BTC address: ${WHITE}${address}${RESET}"
echo ""
local balance
balance=$(check_btc_address_balance "$address")
if [ "$balance" = "ERROR" ]; then
echo -e "${RED}Could not fetch balance from API${RESET}"
echo -e "${GRAY}Try: https://mempool.space/address/$address${RESET}"
return 1
fi
local btc_price
btc_price=$(get_btc_price)
python3 -c "
btc = float('$balance')
price = float('$btc_price')
usd = btc * price
print(f'\033[38;5;82m On-chain Balance: {btc:.8f} BTC\033[0m')
print(f'\033[38;5;82m USD Value: \${usd:,.2f}\033[0m')
print(f'\033[38;5;240m Price: \${price:,.2f}/BTC\033[0m')
"
}
# ═══════════════════════════════════════════════════════════
# PRICE — Live price display
# ═══════════════════════════════════════════════════════════
cmd_price() {
# Refresh price
"$HOME/roadchain-price-feed.sh" fetch 2>/dev/null || true
local btc_price
btc_price=$(get_btc_price)
echo -e "${GREEN}═══════════════════════════════════════${RESET}"
echo -e "${BOLD}${PINK} ROAD/BTC Price${RESET}"
echo -e "${GREEN}═══════════════════════════════════════${RESET}"
echo -e " ${WHITE}1 ROAD = 1 BTC${RESET}"
echo -e " ${WHITE}1 ROAD = \$${btc_price} USD${RESET}"
echo -e " ${WHITE}1 BTC = \$${btc_price} USD${RESET}"
echo -e "${GREEN}═══════════════════════════════════════${RESET}"
}
# ═══════════════════════════════════════════════════════════
# HISTORY — Show conversion history
# ═══════════════════════════════════════════════════════════
cmd_history() {
init_conversions
python3 - "$CONVERSIONS_FILE" << 'PYEOF'
import json, sys
with open(sys.argv[1]) as f:
data = json.load(f)
convs = data['conversions']
g = '\033[38;5;82m'
r = '\033[38;5;196m'
a = '\033[38;5;214m'
p = '\033[38;5;205m'
w = '\033[38;5;255m'
d = '\033[38;5;240m'
bold = '\033[1m'
x = '\033[0m'
print(f'{a}{"═" * 70}{x}')
print(f'{bold}{p} CONVERSION HISTORY{x}')
print(f'{a}{"═" * 70}{x}')
if not convs:
print(f' {d}No conversions yet. Use: roadchain-convert.sh deposit <btc>{x}')
else:
for c in convs:
if c['type'] == 'BTC_TO_ROAD':
arrow = f'{g}BTC \u2192 ROAD{x}'
detail = f'{c["btc_in"]:.8f} BTC \u2192 {c["road_out"]:.8f} ROAD'
else:
arrow = f'{r}ROAD \u2192 BTC{x}'
detail = f'{c["road_in"]:.8f} ROAD \u2192 {c["btc_out"]:.8f} BTC'
print(f' {w}{c["datetime"]}{x} {arrow} {w}{detail}{x} {d}${c["usd_value"]:,.2f}{x} {d}[{c["id"]}]{x}')
print(f'{a}{"─" * 70}{x}')
print(f' Deposits: {data["total_deposits"]} | Redemptions: {data["total_redemptions"]}')
print(f'{a}{"═" * 70}{x}')
PYEOF
}
# ═══════════════════════════════════════════════════════════
# WALLET — Show wallet with backed/unbacked breakdown
# ═══════════════════════════════════════════════════════════
cmd_wallet() {
local wallet_name="${1:-alexa}"
local wallet_file="$WALLETS_DIR/${wallet_name}.json"
if [ ! -f "$wallet_file" ]; then
echo -e "${RED}Wallet not found: $wallet_name${RESET}"
echo -e "${GRAY}Available: $(ls $WALLETS_DIR/*.json 2>/dev/null | xargs -I{} basename {} .json | grep -v bitcoin-bridge | tr '\n' ' ')${RESET}"
return 1
fi
local btc_price
btc_price=$(get_btc_price)
python3 - "$wallet_file" "$btc_price" << 'PYEOF'
import json, sys
with open(sys.argv[1]) as f:
w = json.load(f)
btc_price = float(sys.argv[2])
bal = w.get('balance', 0)
backed = w.get('backed_balance', 0)
unbacked = w.get('unbacked_balance', bal)
g = '\033[38;5;82m'
a = '\033[38;5;214m'
p = '\033[38;5;205m'
b = '\033[38;5;69m'
wh = '\033[38;5;255m'
d = '\033[38;5;240m'
bold = '\033[1m'
x = '\033[0m'
print(f'{g}{"═" * 50}{x}')
print(f'{bold}{p} WALLET: {w.get("name", "unknown")}{x}')
print(f'{g}{"═" * 50}{x}')
print(f' {wh}Address: {w.get("address", "N/A")}{x}')
print(f' {g}Balance: {bal:.8f} ROAD{x}')
print(f' {g} = {bal:.8f} BTC{x}')
print(f' {g} = ${bal * btc_price:,.2f} USD{x}')
print(f'{g}{"─" * 50}{x}')
print(f' {a}Backed: {backed:.8f} ROAD (BTC-backed, redeemable){x}')
print(f' {b}Genesis: {unbacked:.8f} ROAD (genesis mint){x}')
print(f'{g}{"─" * 50}{x}')
ld = w.get('last_deposit')
if ld:
print(f' {d}Last deposit: {ld["btc"]:.8f} BTC [{ld["conv_id"]}]{x}')
lr = w.get('last_redemption')
if lr:
print(f' {d}Last redeem: {lr["road_burned"]:.8f} ROAD [{lr["conv_id"]}]{x}')
print(f'{g}{"═" * 50}{x}')
PYEOF
}
# ═══════════════════════════════════════════════════════════
# MAIN
# ═══════════════════════════════════════════════════════════
case "${1:-help}" in
deposit) cmd_deposit "${2:-}" "${3:-alexa}" ;;
redeem) cmd_redeem "${2:-}" "${3:-alexa}" ;;
status) cmd_status ;;
verify) cmd_verify ;;
watch) cmd_watch "${2:-$BTC_DEPOSIT_ADDRESS}" ;;
price) cmd_price ;;
history) cmd_history ;;
wallet) cmd_wallet "${2:-alexa}" ;;
help|--help|-h)
echo -e "${BOLD}${PINK}RoadChain BTC ↔ ROAD Converter${RESET}"
echo ""
echo -e " ${GREEN}deposit${RESET} <btc> [wallet] Record BTC deposit, mint backed ROAD"
echo -e " ${GREEN}redeem${RESET} <road> [wallet] Burn ROAD, claim BTC"
echo -e " ${GREEN}status${RESET} Full reserve & supply report"
echo -e " ${GREEN}verify${RESET} Audit reserve integrity"
echo -e " ${GREEN}watch${RESET} [btc_address] Check BTC address balance (API)"
echo -e " ${GREEN}price${RESET} Live BTC/ROAD price"
echo -e " ${GREEN}history${RESET} Conversion history"
echo -e " ${GREEN}wallet${RESET} [name] Wallet balance breakdown"
echo ""
echo -e " ${GRAY}1 ROAD = 1 BTC (1:1 peg, BTC-backed)${RESET}"
echo -e " ${GRAY}Owner: ALEXALOUISEAMUNDSON.COM${RESET}"
;;
*)
echo -e "${RED}Unknown command: $1${RESET}"
echo "Run: $0 help"
exit 1
;;
esac