Files
lucidia-core/orchestrator.py
Alexa Louise 6afdb4b148 Initial extraction from blackroad-prism-console
Lucidia Core - AI reasoning engines for specialized domains:
- Physicist (867 lines) - energy modeling, force calculations
- Mathematician (760 lines) - symbolic computation, proofs
- Geologist (654 lines) - terrain modeling, stratigraphy
- Engineer (599 lines) - structural analysis, optimization
- Painter (583 lines) - visual generation, graphics
- Chemist (569 lines) - molecular analysis, reactions
- Analyst (505 lines) - pattern recognition, insights
- Plus: architect, researcher, mediator, speaker, poet, navigator

Features:
- FastAPI wrapper with REST endpoints for each agent
- CLI with `lucidia list`, `lucidia run`, `lucidia api`
- Codex YAML configurations for agent personalities
- Quantum engine extensions

12,512 lines of Python across 91 files.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-30 08:00:53 -06:00

65 lines
2.2 KiB
Python

"""Concurrency-safe orchestrator for Lucidia shard jobs."""
from __future__ import annotations
import concurrent.futures
import logging
import time
from typing import Any, Callable, Dict, Tuple
logger = logging.getLogger(__name__)
def run_shards(
job_fn: Callable[[int], Any], *, num_shards: int = 10, timebox_seconds: int = 60
) -> Tuple[Dict[int, Any], Dict[int, str]]:
"""Execute shard jobs in parallel with a global timebox.
Parameters
----------
job_fn:
Callable accepting a ``shard_id`` and returning a result.
num_shards:
Total number of shards to execute.
timebox_seconds:
Maximum wall clock time allowed for all shards.
Returns
-------
Tuple[Dict[int, Any], Dict[int, str]]
A tuple ``(results, errors)`` where ``results`` maps shard IDs to
successful return values and ``errors`` maps shard IDs to error
messages for failed or cancelled jobs.
"""
# Ψ′:orchestrate — manage parallel shard execution
start = time.monotonic()
deadline = start + timebox_seconds
results: Dict[int, Any] = {}
errors: Dict[int, str] = {}
with concurrent.futures.ThreadPoolExecutor(max_workers=num_shards) as executor:
future_map = {executor.submit(job_fn, shard): shard for shard in range(num_shards)}
while future_map and time.monotonic() < deadline:
timeout = deadline - time.monotonic()
done, _ = concurrent.futures.wait(
future_map,
timeout=timeout,
return_when=concurrent.futures.FIRST_COMPLETED,
)
if not done:
break
for fut in done:
shard_id = future_map.pop(fut)
try:
results[shard_id] = fut.result()
except Exception as exc: # pragma: no cover - logging
logger.exception("Shard %s failed", shard_id)
errors[shard_id] = str(exc)
for fut, shard_id in future_map.items():
fut.cancel()
errors[shard_id] = "cancelled"
if future_map:
logger.warning("Timebox exceeded; cancelled %d shards", len(future_map))
return results, errors