#!/bin/bash # Wave 13: Performance Optimization & Query Caching echo "⚡ Wave 13: Performance Optimization" echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" echo "" echo "🎯 Deploying to octavia..." echo "" ssh octavia 'bash -s' << 'REMOTE_SCRIPT' echo "📊 Creating performance cache service..." mkdir -p ~/perf-cache cat > ~/perf-cache/app.py << 'PYTHON_EOF' #!/usr/bin/env python3 """Performance Cache Service - Reduce backend load with smart caching""" import http.server import socketserver import json import urllib.request import urllib.error import hashlib import time import os from datetime import datetime PORT = 6000 # In-memory cache cache = {} cache_hits = 0 cache_misses = 0 cache_size_limit = 1000 # Max entries # Backend services to cache BACKENDS = { 'tts': 'http://localhost:5001', 'monitor': 'http://localhost:5002', 'metrics': 'http://localhost:5400', 'analytics': 'http://localhost:5500' } def get_cache_key(url, params): """Generate cache key from URL and params""" key_str = f"{url}:{json.dumps(params, sort_keys=True)}" return hashlib.md5(key_str.encode()).hexdigest() def get_cached(key): """Get from cache if fresh""" global cache_hits, cache_misses if key in cache: entry = cache[key] # Check TTL (time-to-live) if time.time() - entry['timestamp'] < entry['ttl']: cache_hits += 1 return entry['data'] cache_misses += 1 return None def set_cache(key, data, ttl=60): """Store in cache with TTL""" global cache if len(cache) >= cache_size_limit: # Evict oldest entry oldest_key = min(cache.keys(), key=lambda k: cache[k]['timestamp']) del cache[oldest_key] cache[key] = { 'data': data, 'timestamp': time.time(), 'ttl': ttl } def fetch_from_backend(service, endpoint, params): """Fetch from backend service""" try: url = f"{BACKENDS[service]}{endpoint}" if params: url += '?' + '&'.join([f"{k}={v}" for k, v in params.items()]) with urllib.request.urlopen(url, timeout=5) as response: return response.read().decode() except Exception as e: return json.dumps({'error': str(e)}) class CacheHandler(http.server.SimpleHTTPRequestHandler): def do_GET(self): """Handle GET requests with caching""" global cache_hits, cache_misses if self.path == '/': self.send_dashboard() elif self.path == '/api/health': self.send_json({'status': 'healthy', 'service': 'perf-cache'}) elif self.path == '/api/stats': total = cache_hits + cache_misses hit_rate = (cache_hits / total * 100) if total > 0 else 0 self.send_json({ 'cache_hits': cache_hits, 'cache_misses': cache_misses, 'hit_rate': f"{hit_rate:.1f}%", 'cache_size': len(cache), 'cache_limit': cache_size_limit }) elif self.path.startswith('/api/cache/'): # Parse cache request: /api/cache/{service}/{endpoint} parts = self.path.split('/') if len(parts) >= 5: service = parts[3] endpoint = '/' + '/'.join(parts[4:]) # Parse query params params = {} if '?' in endpoint: endpoint, query = endpoint.split('?', 1) params = dict(p.split('=') for p in query.split('&') if '=' in p) # Generate cache key cache_key = get_cache_key(f"{service}{endpoint}", params) # Try cache first cached_data = get_cached(cache_key) if cached_data: self.send_json(json.loads(cached_data), cached=True) else: # Fetch from backend data = fetch_from_backend(service, endpoint, params) # Cache with different TTLs based on endpoint ttl = 30 if 'health' in endpoint else 60 set_cache(cache_key, data, ttl) self.send_json(json.loads(data), cached=False) else: self.send_json({'error': 'Invalid cache path'}) elif self.path == '/api/cache/clear': cache.clear() self.send_json({'message': 'Cache cleared', 'entries_removed': len(cache)}) else: self.send_error(404) def send_json(self, data, cached=None): """Send JSON response""" self.send_response(200) self.send_header('Content-type', 'application/json') if cached is not None: self.send_header('X-Cache', 'HIT' if cached else 'MISS') self.end_headers() self.wfile.write(json.dumps(data).encode()) def send_dashboard(self): """Send HTML dashboard""" total = cache_hits + cache_misses hit_rate = (cache_hits / total * 100) if total > 0 else 0 html = f"""