Merge pull request #10 from blackboxprogramming/copilot/redirect-requests-to-ollama

Route all AI chat to local Ollama; add @copilot/@lucidia/@blackboxprogramming/@ollama mention routing
This commit is contained in:
Alexa Amundson
2026-03-09 03:26:40 -05:00
committed by GitHub
4 changed files with 96 additions and 15 deletions

View File

@@ -14,6 +14,7 @@ import time
from datetime import datetime, timedelta from datetime import datetime, timedelta
import os import os
import asyncio import asyncio
import httpx
# Configuration # Configuration
SECRET_KEY = os.getenv("SECRET_KEY", "blackroad-secret-key-change-in-production") SECRET_KEY = os.getenv("SECRET_KEY", "blackroad-secret-key-change-in-production")
@@ -21,6 +22,10 @@ STRIPE_SECRET_KEY = os.getenv("STRIPE_SECRET_KEY", "sk_test_...")
JWT_ALGORITHM = "HS256" JWT_ALGORITHM = "HS256"
JWT_EXPIRATION_HOURS = 24 JWT_EXPIRATION_HOURS = 24
# Ollama configuration all AI requests go to local Ollama instance
OLLAMA_BASE_URL = os.getenv("OLLAMA_BASE_URL", "http://localhost:11434")
OLLAMA_MODEL = os.getenv("OLLAMA_MODEL", "llama3")
# Initialize FastAPI # Initialize FastAPI
app = FastAPI( app = FastAPI(
title="BlackRoad OS API", title="BlackRoad OS API",
@@ -165,6 +170,28 @@ async def get_current_user_info(user_id: Optional[str] = Depends(get_current_use
raise HTTPException(status_code=404, detail="User not found") raise HTTPException(status_code=404, detail="User not found")
# Ollama helper sends chat history to local Ollama and returns the reply
async def _ollama_chat(messages: list) -> str:
"""Call local Ollama instance. Returns the assistant reply text."""
try:
async with httpx.AsyncClient(timeout=120.0) as client:
resp = await client.post(
f"{OLLAMA_BASE_URL}/api/chat",
json={"model": OLLAMA_MODEL, "messages": messages, "stream": False},
)
resp.raise_for_status()
data = resp.json()
return data.get("message", {}).get("content", "").strip()
except httpx.ConnectError:
return (
"⚠️ Ollama is not reachable at "
f"{OLLAMA_BASE_URL}. "
"Please make sure Ollama is running on your local machine."
)
except Exception as exc: # noqa: BLE001
return f"⚠️ Ollama error: {exc}"
# AI Chat endpoints # AI Chat endpoints
@app.post("/api/ai-chat/chat") @app.post("/api/ai-chat/chat")
async def chat(message: ChatMessage, user_id: Optional[str] = Depends(get_current_user)): async def chat(message: ChatMessage, user_id: Optional[str] = Depends(get_current_user)):
@@ -186,8 +213,14 @@ async def chat(message: ChatMessage, user_id: Optional[str] = Depends(get_curren
} }
conversations_db[conversation_id]["messages"].append(user_msg) conversations_db[conversation_id]["messages"].append(user_msg)
# Generate AI response (mock - replace with real LLM) # Build conversation history for Ollama (exclude timestamp field)
ai_response = f"I'm BlackRoad OS. You said: '{message.message}'. I have 30,000 agents ready to help!" history = [
{"role": m["role"], "content": m["content"]}
for m in conversations_db[conversation_id]["messages"]
]
# Route to Ollama local hardware, no external providers
ai_response = await _ollama_chat(history)
ai_msg = { ai_msg = {
"role": "assistant", "role": "assistant",
@@ -199,7 +232,8 @@ async def chat(message: ChatMessage, user_id: Optional[str] = Depends(get_curren
return { return {
"conversation_id": conversation_id, "conversation_id": conversation_id,
"message": ai_response, "message": ai_response,
"messages": conversations_db[conversation_id]["messages"] "messages": conversations_db[conversation_id]["messages"],
"provider": "ollama",
} }
@app.get("/api/ai-chat/conversations") @app.get("/api/ai-chat/conversations")

View File

@@ -97,6 +97,8 @@ class BlackRoadAPI {
} }
// AI Chat: Send message // AI Chat: Send message
// Messages containing @copilot, @lucidia, @blackboxprogramming, or @ollama
// are routed directly to the local Ollama instance, bypassing external providers.
async chat(message, conversationId = null) { async chat(message, conversationId = null) {
return this.request('/api/ai-chat/chat', { return this.request('/api/ai-chat/chat', {
method: 'POST', method: 'POST',
@@ -104,6 +106,22 @@ class BlackRoadAPI {
}); });
} }
// Direct Ollama chat (bypasses the backend entirely, calls Ollama from the browser)
async ollamaChat(message, model = 'llama3', history = []) {
const ollamaBase = 'http://localhost:11434';
const messages = [...history, { role: 'user', content: message }];
const response = await fetch(`${ollamaBase}/api/chat`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ model, messages, stream: false })
});
if (!response.ok) {
throw new Error(`Ollama responded with ${response.status}`);
}
const data = await response.json();
return data.message?.content || '';
}
// AI Chat: List conversations // AI Chat: List conversations
async listConversations() { async listConversations() {
return this.request('/api/ai-chat/conversations'); return this.request('/api/ai-chat/conversations');

View File

@@ -115,12 +115,12 @@
<div class="container"> <div class="container">
<div class="chat-messages" id="chatMessages"> <div class="chat-messages" id="chatMessages">
<div class="message assistant"> <div class="message assistant">
<strong>BlackRoad AI</strong> <strong>BlackRoad AI (powered by Ollama)</strong>
<p>Hello! I'm powered by 30,000 AI agents. Ask me anything!</p> <p>Hello! I'm running on your local Ollama instance. Use <strong>@ollama</strong>, <strong>@copilot</strong>, <strong>@lucidia</strong>, or <strong>@blackboxprogramming</strong> to talk directly to your local hardware — no external providers.</p>
</div> </div>
</div> </div>
<div class="input-area"> <div class="input-area">
<input type="text" id="messageInput" placeholder="Type your message..." /> <input type="text" id="messageInput" placeholder="Type your message… use @ollama, @copilot, @lucidia, or @blackboxprogramming to talk to your local Ollama" />
<button onclick="sendMessage()" id="sendBtn">Send</button> <button onclick="sendMessage()" id="sendBtn">Send</button>
</div> </div>
</div> </div>
@@ -132,6 +132,10 @@
const messageInput = document.getElementById('messageInput'); const messageInput = document.getElementById('messageInput');
const sendBtn = document.getElementById('sendBtn'); const sendBtn = document.getElementById('sendBtn');
let conversationId = null; let conversationId = null;
let ollamaHistory = [];
// @ mentions that route to local Ollama
const OLLAMA_MENTION = /@(copilot|lucidia|blackboxprogramming|ollama)\b/i;
// Check auth status // Check auth status
async function checkAuth() { async function checkAuth() {
@@ -157,17 +161,27 @@
sendBtn.disabled = true; sendBtn.disabled = true;
// Show loading // Show loading
const loadingId = addMessage('assistant', 'BlackRoad AI', '<div class="loading">Thinking...</div>'); const loadingId = addMessage('assistant', 'BlackRoad AI', '<div class="loading">Thinking</div>');
const usesOllama = OLLAMA_MENTION.test(message);
try { try {
const data = await window.blackroad.chat(message, conversationId); let responseText;
conversationId = data.conversation_id; if (usesOllama) {
// Route directly to local Ollama no external provider
// Remove loading message responseText = await window.blackroad.ollamaChat(message, 'llama3', ollamaHistory);
document.getElementById(loadingId).remove(); ollamaHistory.push({ role: 'user', content: message });
ollamaHistory.push({ role: 'assistant', content: responseText });
// Add AI response document.getElementById(loadingId).remove();
addMessage('assistant', 'BlackRoad AI', data.message || 'I received your message!'); addMessage('assistant', '🦙 Ollama (local)', responseText);
} else {
// Route through backend (which also calls Ollama)
const data = await window.blackroad.chat(message, conversationId);
conversationId = data.conversation_id;
document.getElementById(loadingId).remove();
const label = data.provider === 'ollama' ? '🦙 Ollama (local)' : 'BlackRoad AI';
addMessage('assistant', label, data.message || 'I received your message!');
}
} catch (error) { } catch (error) {
document.getElementById(loadingId).remove(); document.getElementById(loadingId).remove();
addMessage('assistant', 'BlackRoad AI', 'Sorry, I encountered an error. Please try again.'); addMessage('assistant', 'BlackRoad AI', 'Sorry, I encountered an error. Please try again.');

View File

@@ -3,6 +3,15 @@
version: "3.9" version: "3.9"
services: services:
ollama:
image: ollama/ollama:latest
container_name: blackroad-ollama
ports:
- "11434:11434"
volumes:
- ollama_data:/root/.ollama
restart: unless-stopped
lucidia-api: lucidia-api:
build: . build: .
container_name: lucidia-api container_name: lucidia-api
@@ -11,8 +20,11 @@ services:
- .:/app - .:/app
ports: ports:
- "8000:8000" - "8000:8000"
environment:
- OLLAMA_BASE_URL=http://ollama:11434
depends_on: depends_on:
- nginx - nginx
- ollama
nginx: nginx:
image: nginx:latest image: nginx:latest
@@ -22,3 +34,6 @@ services:
ports: ports:
- "80:80" - "80:80"
volumes:
ollama_data: