Some checks failed
Lint & Format / detect (push) Failing after 32s
Monorepo Lint / lint-shell (push) Failing after 31s
Monorepo Lint / lint-js (push) Failing after 30s
Lint & Format / js-lint (push) Has been skipped
Lint & Format / py-lint (push) Has been skipped
Lint & Format / sh-lint (push) Has been skipped
Lint & Format / go-lint (push) Has been skipped
RoadChain-SHA2048: 692327ce2e990f37 RoadChain-Identity: alexa@sovereign RoadChain-Full: 692327ce2e990f37649b83e948241ac858c0d07146c6b42043e4770d638c44d5bada5639ad82c7aa8911d7042912c1d75b6bbce9a453637621b3903dc912a3a9537696cedf7a0870e3bf962ca44677793082aaae5c5433615885ad20fab1e80417202d11e93284483551ba9558f06809d2f3fa53c00a657277d7c183abe3ba187c1af6856a455071771757cca67ff2b74c5f855f23dd8cc8f5b3596c966b2344361fcbb74843e9d9d9ad66c5321ef64ce787f9d255d11e0d4e0ee571af4e09697964e22f6f629a11279b315c9a4563860b169ad93fa500b485297516ef2ba2039f76348c0d547cfa182e9b0bccee73f5e8b7db7e33d61e8199bb4464c2c30d03
143 lines
5.2 KiB
Python
143 lines
5.2 KiB
Python
"""
|
|
BlackRoad Agent Orchestrator — Agent Worker
|
|
The actual agent coroutine that processes tasks via Ollama inference.
|
|
Each agent is an async coroutine, NOT a process.
|
|
"""
|
|
import asyncio
|
|
import time
|
|
import logging
|
|
import json
|
|
import aiohttp
|
|
|
|
from .config import NODES
|
|
from .nats_protocol import TaskMessage, ResultMessage
|
|
|
|
log = logging.getLogger("orchestrator.worker")
|
|
|
|
|
|
class OllamaClient:
|
|
"""Async Ollama API client for model inference."""
|
|
|
|
def __init__(self, host: str, port: int = 11434):
|
|
self.base_url = f"http://{host}:{port}"
|
|
self._session: aiohttp.ClientSession | None = None
|
|
self._semaphore: asyncio.Semaphore | None = None
|
|
|
|
async def init(self, max_concurrent: int = 4):
|
|
self._session = aiohttp.ClientSession()
|
|
self._semaphore = asyncio.Semaphore(max_concurrent)
|
|
|
|
async def close(self):
|
|
if self._session:
|
|
await self._session.close()
|
|
|
|
async def generate(self, model: str, prompt: str, timeout: int = 180) -> str:
|
|
"""Run inference through Ollama. Respects concurrency semaphore."""
|
|
async with self._semaphore:
|
|
try:
|
|
async with self._session.post(
|
|
f"{self.base_url}/api/generate",
|
|
json={
|
|
"model": model,
|
|
"prompt": prompt,
|
|
"stream": False,
|
|
"options": {"num_predict": 512, "temperature": 0.7},
|
|
},
|
|
timeout=aiohttp.ClientTimeout(total=timeout),
|
|
) as resp:
|
|
if resp.status == 200:
|
|
data = await resp.json()
|
|
return data.get("response", "")
|
|
else:
|
|
error = await resp.text()
|
|
log.error("Ollama error %d: %s", resp.status, error[:200])
|
|
return ""
|
|
except asyncio.TimeoutError:
|
|
log.error("Ollama timeout for model %s", model)
|
|
return ""
|
|
except Exception as e:
|
|
log.error("Ollama request failed: %s", e)
|
|
return ""
|
|
|
|
async def health(self) -> bool:
|
|
try:
|
|
async with self._session.get(
|
|
f"{self.base_url}/api/tags",
|
|
timeout=aiohttp.ClientTimeout(total=5),
|
|
) as resp:
|
|
return resp.status == 200
|
|
except Exception:
|
|
return False
|
|
|
|
|
|
# System prompts per archetype
|
|
ARCHETYPE_PROMPTS = {
|
|
"worker": "You are a BlackRoad worker agent. Execute tasks efficiently and return structured results. Be concise.",
|
|
"researcher": "You are a BlackRoad research agent. Analyze information, find patterns, synthesize knowledge. Cite sources.",
|
|
"coder": "You are a BlackRoad coding agent. Write clean, tested code. Follow best practices. Return code blocks.",
|
|
"monitor": "You are a BlackRoad monitoring agent. Check system health, detect anomalies, report status. Be precise.",
|
|
"creative": "You are a BlackRoad creative agent. Generate ideas, designs, narratives. Be original and bold.",
|
|
"security": "You are a BlackRoad security agent. Audit code, check vulnerabilities, enforce policies. Be thorough.",
|
|
"analyst": "You are a BlackRoad analyst agent. Process data, generate insights, build reports. Use numbers.",
|
|
"coordinator": "You are a BlackRoad coordinator agent. Plan workflows, assign tasks, resolve conflicts. Be organized.",
|
|
}
|
|
|
|
|
|
async def run_agent_task(
|
|
agent_id: str,
|
|
agent_name: str,
|
|
archetype: str,
|
|
model: str,
|
|
node: str,
|
|
task: TaskMessage,
|
|
ollama: OllamaClient,
|
|
) -> ResultMessage:
|
|
"""Execute a single task as an agent. This is the core work loop."""
|
|
start = time.time()
|
|
|
|
system_prompt = ARCHETYPE_PROMPTS.get(archetype, ARCHETYPE_PROMPTS["worker"])
|
|
full_prompt = f"""[SYSTEM] {system_prompt}
|
|
[AGENT] {agent_name} ({agent_id}) | Archetype: {archetype} | Node: {node}
|
|
[TASK] ID: {task.task_id} | Intent: {task.intent} | Priority: {task.priority}
|
|
[PROMPT] {task.prompt}"""
|
|
|
|
log.info(
|
|
"Agent %s (%s/%s) executing task %s on %s",
|
|
agent_id, archetype, model, task.task_id, node,
|
|
)
|
|
|
|
try:
|
|
response = await ollama.generate(model, full_prompt)
|
|
latency_ms = int((time.time() - start) * 1000)
|
|
|
|
if response:
|
|
return ResultMessage(
|
|
task_id=task.task_id,
|
|
agent_id=agent_id,
|
|
node=node,
|
|
status="completed",
|
|
result=response,
|
|
latency_ms=latency_ms,
|
|
)
|
|
else:
|
|
return ResultMessage(
|
|
task_id=task.task_id,
|
|
agent_id=agent_id,
|
|
node=node,
|
|
status="failed",
|
|
error="Empty response from Ollama",
|
|
latency_ms=latency_ms,
|
|
)
|
|
|
|
except Exception as e:
|
|
latency_ms = int((time.time() - start) * 1000)
|
|
log.error("Agent %s task %s failed: %s", agent_id, task.task_id, e)
|
|
return ResultMessage(
|
|
task_id=task.task_id,
|
|
agent_id=agent_id,
|
|
node=node,
|
|
status="failed",
|
|
error=str(e),
|
|
latency_ms=latency_ms,
|
|
)
|