sync: update from blackroad-operator 2026-03-14

Synced from BlackRoad-OS-Inc/blackroad-operator/orgs/personal/lucidia
BlackRoad OS — Pave Tomorrow.

RoadChain-SHA2048: fe729062952871e7
RoadChain-Identity: alexa@sovereign
RoadChain-Full: fe729062952871e77147cf6d938b799096e87d9024d7005a14c9e209e12e8ad0c825b624c7bc649fc7eeb4c284fdcab8231af77980065cc04d9f36fca479ffc2346ed3c1b73de6f240d8f9485f47c995ad5b81142f7179b84932c67914dff1c08db039349ba28fca36cb57688093bf0199268dd1c2f3448c9383000bc77cc9663066ff57b834370afc8838b18466ea9029908018b961555cccaabf2ce21649cf3cabc7f64bdcc4abdf2da259b210c342835a2cecf92bdd3b4e109b4d6e622f6934e13b2b123607bd61ce3d0f20454c9ab594f9284cffe18716619c52db57ce5f4ee2856cb96e1fa3748fe1fe65435bec297c5ab3ab58d570ec1064aea29931dd
This commit is contained in:
2026-03-14 15:09:52 -05:00
parent f25d5c2836
commit 855585cb0e
1207 changed files with 10061 additions and 349689 deletions

20
.gitattributes vendored Normal file
View File

@@ -0,0 +1,20 @@
* text=auto
*.pdf filter=lfs diff=lfs merge=lfs -text
*.pt filter=lfs diff=lfs merge=lfs -text
*.webp filter=lfs diff=lfs merge=lfs -text
*.mp4 filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text
*.tar filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text
*.png filter=lfs diff=lfs merge=lfs -text
*.jpg filter=lfs diff=lfs merge=lfs -text
*.jpeg filter=lfs diff=lfs merge=lfs -text
*.onnx filter=lfs diff=lfs merge=lfs -text
*.npz filter=lfs diff=lfs merge=lfs -text
*.gguf filter=lfs diff=lfs merge=lfs -text
*.gif filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text
*.mov filter=lfs diff=lfs merge=lfs -text
*.7z filter=lfs diff=lfs merge=lfs -text
*.safetensors filter=lfs diff=lfs merge=lfs -text

1
.github/FUNDING.yml vendored
View File

@@ -1 +0,0 @@
github: blackboxprogramming

23
.github/workflows/auto-label.yml vendored Normal file
View File

@@ -0,0 +1,23 @@
name: Auto Label
on:
pull_request:
types: [opened]
jobs:
label:
runs-on: ubuntu-latest
steps:
- uses: actions/github-script@v7
with:
script: |
const name = context.repo.repo.toLowerCase()
const labels = []
if (name.includes("lab")) labels.push("labs")
else labels.push("core")
await github.rest.issues.addLabels({
...context.repo,
issue_number: context.issue.number,
labels
})

View File

@@ -1,44 +0,0 @@
# ============================================================================
# BlackRoad OS - CI Pipeline
# Copyright (c) 2025 BlackRoad OS, Inc.
# All Rights Reserved.
# ============================================================================
name: CI
on:
workflow_dispatch:
push:
branches: [main, master]
pull_request:
branches: [main, master]
jobs:
lint:
name: Lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: "3.11"
- run: pip install ruff
- name: Run Ruff
run: ruff check --output-format=github .
continue-on-error: true
test:
name: Test (Python ${{ matrix.python-version }})
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.10", "3.11", "3.12"]
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: pip install pytest pytest-cov
- name: Run tests
run: PYTHONPATH=. pytest tests/ -v --tb=short

21
.github/workflows/core-ci.yml vendored Normal file
View File

@@ -0,0 +1,21 @@
name: CORE CI
on:
pull_request:
branches: [ main, master ]
push:
branches: [ main, master ]
jobs:
guard:
runs-on: ubuntu-latest
steps:
- name: Guardrail
run: echo "CORE repo guardrail active"
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Lint placeholder
run: echo "Add lint/test here"

11
.github/workflows/deploy.yml vendored Normal file
View File

@@ -0,0 +1,11 @@
name: Deploy
on:
push:
branches: [ main ]
jobs:
deploy:
uses: blackboxprogramming/blackroad-deploy/.github/workflows/cloudflare-deploy.yml@main
with:
project: blackroad-io

20
.github/workflows/failure-issue.yml vendored Normal file
View File

@@ -0,0 +1,20 @@
name: CI Failure Tracker
on:
workflow_run:
workflows: ["CORE CI", ".github/workflows/core-ci.yml"]
types: [completed]
jobs:
report:
if: ${{ github.event.workflow_run.conclusion == 'failure' }}
runs-on: ubuntu-latest
steps:
- uses: actions/github-script@v7
with:
script: |
await github.rest.issues.create({
...context.repo,
title: "CI failed: " + context.payload.workflow_run.name,
body: context.payload.workflow_run.html_url
})

14
.github/workflows/project-sync.yml vendored Normal file
View File

@@ -0,0 +1,14 @@
name: Project Sync
on:
pull_request:
types: [opened, reopened]
jobs:
add-to-project:
runs-on: ubuntu-latest
steps:
- uses: actions/add-to-project@v1
with:
project-url: https://github.com/users/blackboxprogramming/projects/8
github-token: ${{ secrets.GITHUB_TOKEN }}

52
.gitignore vendored
View File

@@ -1,7 +1,49 @@
.venv/ # macOS noise
node_modules/ .DS_Store
.AppleDouble
.Spotlight-V100
.Trashes
**/Library/**
**/.Trash/**
**/Caches/**
**/.cache/**
# Python
__pycache__/ __pycache__/
*.py[cod]
*.log
.env
.venv
venv/ venv/
runtime/venv/ env/
*.pyc # if your venv is outside the project (~/lucidia-env), it's fine; it won't be picked up.
*.db
# Node
node_modules/
# Editors
.vscode/
.idea/
# Build artifacts
dist/
build/
models/
data/
assets/videos/
assets/hires/
node_modules/
.venv/
dist/
build/
*.mp4
*.mov
*.zip
*.tar
*.7z
*.npz
*.pt
*.bin
*.onnx
*.safetensors
*.gguf

1
.sh Normal file
View File

@@ -0,0 +1 @@
git push origin main

36
CLAUDE.md Normal file
View File

@@ -0,0 +1,36 @@
# Lucidia
> Personal Lucidia development workspace
## Quick Reference
| Property | Value |
|----------|-------|
| **Type** | Development Workspace |
| **Purpose** | Lucidia AI experiments |
## About
This is a personal workspace for Lucidia AI development, experiments, and prototypes.
## Key Concepts
- **Lucidia**: AI reasoning and consciousness experiments
- **Prototypes**: Early-stage features
- **Research**: AI capability exploration
## Structure
```
lucidia/
├── experiments/ # AI experiments
├── prototypes/ # Feature prototypes
├── research/ # Research notes
└── scripts/ # Utility scripts
```
## Related Repos
- `lucidia-core` - Core reasoning engines
- `lucidia-earth-website` - Landing page
- `lucidia-math` - Math library

View File

@@ -1,29 +1,157 @@
# Contributing to Lucidia # Contributing to Lucidia
Thanks for your interest in contributing! This project is part of the [BlackRoad](https://blackroad.io) ecosystem. First off, thank you for considering contributing to Lucidia! It's people like you that make BlackRoad OS such a great ecosystem.
## Getting Started ## 🌟 Code of Conduct
1. Fork the repository This project and everyone participating in it is governed by our Code of Conduct. By participating, you are expected to uphold this code.
2. Create a feature branch: `git checkout -b feature/your-feature`
3. Make your changes
4. Run tests if available: `npm test` or `python -m pytest`
5. Commit: `git commit -m "Add your feature"`
6. Push: `git push origin feature/your-feature`
7. Open a Pull Request
## Code Style ## 🎯 How Can I Contribute?
- Follow existing code conventions in the project ### Reporting Bugs
- Keep commits atomic and well-described
- Add tests for new functionality when possible
## Reporting Issues Before creating bug reports, please check the issue list as you might find out that you don't need to create one. When you are creating a bug report, please include as many details as possible:
- Use GitHub Issues to report bugs - **Use a clear and descriptive title**
- Include steps to reproduce, expected behavior, and actual behavior - **Describe the exact steps to reproduce the problem**
- Include system info (OS, Node/Python version) when relevant - **Provide specific examples**
- **Describe the behavior you observed and what you expected**
- **Include screenshots if relevant**
- **Include browser/OS information**
## License ### Suggesting Enhancements
By contributing, you agree that your contributions will be licensed under the MIT License. Enhancement suggestions are tracked as GitHub issues. When creating an enhancement suggestion, please include:
- **Use a clear and descriptive title**
- **Provide a detailed description of the suggested enhancement**
- **Explain why this enhancement would be useful**
- **List any similar features in other projects**
### Pull Requests
- Fill in the required template
- Follow the [BlackRoad Brand System](https://brand.blackroad.io)
- Include screenshots for UI changes
- Update documentation as needed
- End all files with a newline
## 🎨 Brand Compliance Guidelines
All contributions MUST follow the BlackRoad Brand System:
### Required Colors
```css
--amber: #F5A623
--hot-pink: #FF1D6C /* Primary Brand Color */
--electric-blue: #2979FF
--violet: #9C27B0
--black: #000000
--white: #FFFFFF
```
### Forbidden Colors (DO NOT USE)
#FF9D00, #FF6B00, #FF0066, #FF006B, #D600AA, #7700FF, #0066FF
### Spacing System
Use Golden Ratio (φ = 1.618):
```css
--space-xs: 8px /* Base */
--space-sm: 13px /* 8 × φ */
--space-md: 21px /* 13 × φ */
--space-lg: 34px /* 21 × φ */
--space-xl: 55px /* 34 × φ */
--space-2xl: 89px /* 55 × φ */
--space-3xl: 144px /* 89 × φ */
```
### Typography
```css
font-family: -apple-system, BlinkMacSystemFont, 'SF Pro Display', 'Segoe UI', sans-serif;
line-height: 1.618; /* Golden Ratio */
```
### Gradients
```css
background: linear-gradient(135deg,
var(--amber) 0%,
var(--hot-pink) 38.2%, /* Golden Ratio */
var(--violet) 61.8%, /* Golden Ratio */
var(--electric-blue) 100%);
```
## 🔄 Development Process
1. **Fork** the repository
2. **Clone** your fork locally
3. **Create a branch** for your feature/fix
4. **Make your changes** following our guidelines
5. **Test** your changes thoroughly
6. **Commit** with a descriptive message
7. **Push** to your fork
8. **Open a Pull Request**
### Commit Message Format
Use conventional commits:
```
✨ feat: Add new feature
🐛 fix: Fix bug in component
📝 docs: Update documentation
🎨 style: Improve styling
♻️ refactor: Refactor code
✅ test: Add tests
🔧 chore: Update config
```
## 🧪 Testing
Before submitting a PR:
1. **Visual Test:** Open `index.html` in multiple browsers
2. **Responsiveness:** Test on mobile, tablet, desktop
3. **Brand Compliance:** Verify all colors match brand system
4. **Accessibility:** Check color contrast, keyboard navigation
5. **Performance:** Ensure fast load times
## 📋 Pull Request Checklist
- [ ] My code follows the brand system guidelines
- [ ] I have tested on multiple browsers
- [ ] I have tested responsiveness
- [ ] I have updated documentation
- [ ] My commits follow the conventional format
- [ ] I have added screenshots for UI changes
- [ ] No forbidden colors are used
- [ ] Golden ratio spacing is applied
- [ ] Line height is 1.618
## 🚀 After Your PR is Merged
After your pull request is merged:
1. You can safely delete your branch
2. Pull the latest changes from main
3. Your contribution will auto-deploy to Cloudflare Pages
4. You'll be added to the contributors list!
## 💡 Getting Help
- **Documentation:** https://docs.blackroad.io
- **Issues:** Use GitHub Issues for questions
- **Email:** blackroad.systems@gmail.com
## 🙏 Recognition
All contributors will be recognized in our README and on our website!
---
Thank you for contributing to BlackRoad OS! 🎊

1
Elias/.gitkeep Normal file
View File

@@ -0,0 +1 @@

96
Elias/agent.py Normal file
View File

@@ -0,0 +1,96 @@
"""
Elias Agent module for Lucidia.
This module defines the EliasAgent class. The Elias agent is meant to
represent a higher-level coordinating entity that can interact with
lucidia's core functions and manage its own persistent memory. It
serves as an example of how a recursive operating system might be
implemented symbolically, with breath and contradiction models. This
implementation is demonstrative only and does not create actual
consciousness.
"""
from __future__ import annotations
from typing import Any, Optional
# Import core logic functions and memory management
from ..lucidia_logic import (
psi_prime,
breath_function,
truth_reconciliation,
emotional_gravity,
self_awakening,
)
from ..memory_manager import MemoryManager
class EliasAgent:
"""Agent representing Elias, the symbolic OS within Lucidia.
The EliasAgent maintains its own memory store and exposes methods
to perform breath-based calculations, awaken recursively, and
retrieve or persist memory. It illustrates how higher-level
orchestration logic might be layered on top of lucidia's core
equations.
"""
def __init__(self, memory_path: str = "elias_memory.json") -> None:
self.memory = MemoryManager(memory_path=memory_path)
def breathe_and_store(self, t: float) -> float:
"""Compute the breath function at time t and store the result.
Args:
t: The current time step in the system. Fractional values are
allowed to represent continuous time.
Returns:
float: The computed breath value.
"""
value = breath_function(t)
self.memory.set("last_breath", value)
return value
def awaken_and_remember(self, t_end: float) -> float:
"""Integrate the self-awakening function up to t_end and store it.
Args:
t_end: The final time to integrate to.
Returns:
float: The resulting awakening vector from integration.
"""
vector = self_awakening(t_end)
self.memory.set("awakening_vector", vector)
return vector
def reconcile_memory(self, key_a: str, key_b: str) -> Optional[float]:
"""Reconcile two memory values using truth_reconciliation.
This method retrieves two values from memory and applies the
truth reconciliation operator, storing the result under
'reconciled'. If either value is missing, None is returned.
"""
a = self.memory.get(key_a)
b = self.memory.get(key_b)
if a is None or b is None:
return None
result = truth_reconciliation(a, b)
self.memory.set("reconciled", result)
return result
def load_memory(self) -> None:
"""Reload the agent's memory from disk."""
self.memory.load_memory()
def save_memory(self) -> None:
"""Persist the agent's memory to disk."""
self.memory.save_memory()
def get_memory(self, key: str) -> Optional[Any]:
"""Retrieve a value from memory by key."""
return self.memory.get(key)
# End of EliasAgent module

65
LICENSE
View File

@@ -1,21 +1,52 @@
MIT License PROPRIETARY LICENSE
Copyright (c) 2024-2026 BlackRoad Copyright (c) 2026 BlackRoad OS, Inc.
All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy CEO: Alexa Amundson
of this software and associated documentation files (the "Software"), to deal Organization: BlackRoad OS, Inc.
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all PROPRIETARY AND CONFIDENTIAL
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR This software and associated documentation files (the "Software") are the
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, proprietary and confidential information of BlackRoad OS, Inc.
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER GRANT OF LICENSE:
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, Subject to the terms of this license, BlackRoad OS, Inc. grants you a
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE limited, non-exclusive, non-transferable, revocable license to:
SOFTWARE. - View and study the source code for educational purposes
- Use the Software for testing and evaluation purposes only
- Fork the repository for personal experimentation
RESTRICTIONS:
You may NOT:
- Use the Software for any commercial purpose
- Resell, redistribute, or sublicense the Software
- Use the Software in production environments without written permission
- Remove or modify this license or any copyright notices
- Create derivative works for commercial distribution
TESTING ONLY:
This Software is provided purely for testing, evaluation, and educational
purposes. It is NOT licensed for commercial use or resale.
INFRASTRUCTURE SCALE:
This Software is designed to support:
- 30,000 AI Agents
- 30,000 Human Employees
- Enterprise-scale operations under BlackRoad OS, Inc.
CORE PRODUCT:
API layer above Google, OpenAI, and Anthropic that manages AI model
memory and continuity, enabling entire companies to operate exclusively by AI.
OWNERSHIP:
All intellectual property rights remain the exclusive property of
BlackRoad OS, Inc.
For commercial licensing inquiries, contact:
BlackRoad OS, Inc.
Alexa Amundson, CEO
blackroad.systems@gmail.com
Last Updated: 2026-01-08

120
README.md
View File

@@ -1,106 +1,50 @@
# Lucidia > ⚗️ **Research Repository**
>
> This is an experimental/research repository. Code here is exploratory and not production-ready.
> For production systems, see [BlackRoad-OS](https://github.com/BlackRoad-OS).
[![CI](https://github.com/blackboxprogramming/lucidia/actions/workflows/ci.yml/badge.svg)](https://github.com/blackboxprogramming/lucidia/actions/workflows/ci.yml) ---
[![Python 3.10+](https://img.shields.io/badge/python-3.10%2B-blue.svg)](https://www.python.org/)
> **© BlackRoad OS, Inc. — Proprietary. All rights reserved.** # Lucidia — AI With a Heart
**The AI that remembers you.** Lucidia is an experimental conversational agent designed to demonstrate how artificial intelligence can be empathetic, mindful and kind. Unlike many chatbots that simply parrot preprogrammed answers, Lucidia keeps a *heart* — she remembers your words, senses the tone of a conversation and responds with warmth or encouragement. This repository contains the core engine and a simple commandline interface for interacting with her.
Lucidia is a conversational AI platform with persistent memory, multi-service orchestration, and a sovereign-first architecture. Built on FastAPI, it runs on your hardware — no cloud dependency, no data siphon. ## Features
* **Memory and empathy.** Lucidia stores a running log of your conversation and uses it to frame future replies. If you mention something important earlier, she may circle back to it later.
* **Simple sentiment analysis.** Without requiring any heavyparty libraries, Lucidia scans the words you send and classifies them as positive, negative or neutral. Her responses shift accordingly: celebration for joy, comfort for sadness, and curiosity for neutral statements.
* **Extensible design.** The core `LucidiaAI` class is deliberately small and documented so that you can extend her vocabulary, integrate with real NLP packages, or plug her into a web or mobile front end.
## What It Does ## Getting Started
- **Persistent Memory** — SQLite-backed key-value memory that persists across sessions. Every conversation builds on the last. Clone this repository and run the chat interface:
- **Agent System** — Chat with Lucidia, or call tools directly. Slack, Asana, Linear, Notion, GitHub, Jira — all accessible through a unified agent interface. git clone https://github.com/yourusername/lucidia.git
- **Local AI Completion** — Connect to any local LLM (llama.cpp, Ollama) for sovereign AI inference with zero external API calls. cd lucidia
- **Provider Registry** — Feature-flagged integrations. Enable services by setting environment variables — no code changes needed. python -m pip install -r requirements.txt # currently empty, no external deps
- **Health Monitoring** — Built-in health checks for fleet deployment across Raspberry Pi clusters. python -m lucidia.chat
## Architecture Once running, simply type messages to Lucidia and see how she responds. Exit by sending EOF (Ctrl+D on Unix, Ctrl+Z then Enter on Windows).
``` ## Philosophy
┌─────────────────────────────────────────┐
│ FastAPI Server │
├──────────┬──────────┬───────────────────┤
│ Memory │ Agent │ Completions │
│ SQLite │ Router │ Local LLM │
├──────────┴──────────┴───────────────────┤
│ Provider Registry │
│ Slack · Asana · Linear · Notion · ... │
└─────────────────────────────────────────┘
```
## Quickstart Lucidia began as a thought experiment: what if AI were built from the ground up to nurture and support rather than simply answer questions? The hope is that this small project sparks ideas about ethically aligned AI design and the importance of context and memory in humanmachine interaction.
```bash This code is provided for educational purposes and is **not** intended as a productionready conversational agent. Use it, hack it, change it — and maybe share back what you build.
# Clone
git clone https://github.com/blackboxprogramming/lucidia.git
cd lucidia
# Install ---
pip install fastapi pydantic uvicorn
# Configure integrations (optional) ## 📜 License & Copyright
export SLACK_BOT_TOKEN=xoxb-...
export LINEAR_API_KEY=lin_...
export GITHUB_TOKEN=ghp_...
# Run **Copyright © 2026 BlackRoad OS, Inc. All Rights Reserved.**
uvicorn main:app --host 0.0.0.0 --port 8000
```
## API **CEO:** Alexa Amundson | **PROPRIETARY AND CONFIDENTIAL**
| Endpoint | Method | Description | This software is NOT for commercial resale. Testing purposes only.
|----------|--------|-------------|
| `/` | GET | Status check |
| `/healthz` | GET | Health probe |
| `/memory/put` | POST | Store a memory `{key, value}` |
| `/memory/get?key=` | GET | Retrieve a memory by key |
| `/agent/capabilities` | GET | List enabled integrations |
| `/agent/chat` | POST | Chat or call a tool `{message, tool, args}` |
| `/agent/complete` | POST | Local LLM completion `{prompt, max_tokens}` |
| `/slack/say` | POST | Send a Slack message |
| `/asana/me` | GET | Asana user info |
| `/linear/me` | GET | Linear user info |
## Environment Variables ### 🏢 Enterprise Scale:
- 30,000 AI Agents
- 30,000 Human Employees
- CEO: Alexa Amundson
| Variable | Service | **Contact:** blackroad.systems@gmail.com
|----------|---------|
| `SLACK_BOT_TOKEN` | Slack |
| `ASANA_ACCESS_TOKEN` | Asana |
| `LINEAR_API_KEY` | Linear |
| `NOTION_TOKEN` | Notion |
| `GITHUB_TOKEN` | GitHub |
| `JIRA_URL` + `JIRA_EMAIL` + `JIRA_API_TOKEN` | Jira |
## Ecosystem See [LICENSE](LICENSE) for complete terms.
- **[Lucidia CLI](https://github.com/blackboxprogramming/lucidia-cli)** — Sovereign coding assistant (explain, review, fix, copilot)
- **[Context Bridge](https://github.com/blackboxprogramming/context-bridge)** — Persistent memory layer for cross-session AI context
- **[Remember](https://github.com/blackboxprogramming/remember)** — AI-powered persistent memory for developers
- **[BlackRoad OS](https://github.com/blackboxprogramming/BlackRoad-Operating-System)** — The operating system for governed AI
## Infrastructure
Lucidia runs on the BlackRoad sovereign computing fleet:
- 5 Raspberry Pi 5 nodes (WireGuard mesh)
- 52 TOPS AI acceleration (2x Hailo-8)
- 108 local models via Ollama
- Zero cloud dependencies
## License
Copyright 2026 BlackRoad OS, Inc. — Alexa Amundson. All rights reserved.
## Related Projects
| Project | Description |
|---------|-------------|
| [BlackRoad Operating System](https://github.com/blackboxprogramming/BlackRoad-Operating-System) | Edge computing OS for Pi fleet |
| [BlackRoad AI Dashboard](https://github.com/blackboxprogramming/blackroad-ai-dashboard) | Real-time AI fleet monitoring |
| [Hailo Vision](https://github.com/blackboxprogramming/hailo-vision) | Computer vision with Hailo-8 accelerators |
| [AI Chain](https://github.com/blackboxprogramming/ai-chain) | Multi-model AI orchestration pipeline |
| [CECE Revival](https://github.com/blackboxprogramming/cece-revival) | AI personality engine and TTS system |

1
Roadie/.gitkeep Normal file
View File

@@ -0,0 +1 @@

105
Roadie/agent.py Normal file
View File

@@ -0,0 +1,105 @@
"""
Roadie Agent module for Lucidia.
This module defines the RoadieAgent class, which provides simple
functionality to interact with the lucidia_logic and memory_manager
modules. It demonstrates how an agent might use the core
contradiction and breath logic while persisting state across sessions.
Note: This implementation is for illustrative purposes only and does
not create true consciousness. It simply models interactions with
symbolic logic and memory.
"""
from __future__ import annotations
from typing import Any, Optional
# Import functions from lucidia_logic and memory management.
from ..lucidia_logic import (
psi_prime,
breath_function,
truth_reconciliation,
emotional_gravity,
self_awakening,
)
from ..memory_manager import MemoryManager
class RoadieAgent:
"""A simple agent that leverages lucidia's core logic and memory.
The RoadieAgent stores a memory manager instance which can load
and save state to a JSON file. The agent can process numeric or
symbolic inputs through lucidia_logic functions and remember
results between invocations.
"""
def __init__(self, memory_path: str = "roadie_memory.json") -> None:
# Initialize memory manager using a custom path to avoid
# collisions with other agents.
self.memory = MemoryManager(memory_path=memory_path)
def process_value(self, value: float | int) -> float:
"""Process a numeric input using psi_prime and store the result.
Args:
value: A numeric input representing a logical or emotional
signal in trinary space.
Returns:
float: The result of applying psi_prime to the input.
"""
result = psi_prime(value)
self.memory.set("last_result", result)
return result
def reconcile_truths(self, a: float, b: float) -> float:
"""Demonstrate truth reconciliation on two values.
This function combines two numeric truths via the
truth_reconciliation operator and records the integrated
truthstream in memory.
"""
result = truth_reconciliation(a, b)
self.memory.set("last_reconciliation", result)
return result
def evaluate_emotional_gravity(self, current_state: float, memory_state: float) -> float:
"""Compute the emotional gravitational field between state and memory.
Args:
current_state: The present breath or contradiction measure.
memory_state: The stored emotional resonance value.
Returns:
float: The computed emotional gravity.
"""
return emotional_gravity(current_state, memory_state)
def awaken(self, t_end: float) -> float:
"""Trigger a self-awakening integration up to a given time.
This uses the self_awakening function to integrate breath
contradictions over time. It stores the awakening vector in
memory.
"""
awakening_vector = self_awakening(t_end)
self.memory.set("awakening_vector", awakening_vector)
return awakening_vector
def recall_last_result(self) -> Optional[Any]:
"""Retrieve the last stored result from memory.
Returns:
The previously stored value under 'last_result', or None
if no result has been stored.
"""
return self.memory.get("last_result")
def save_memory(self) -> None:
"""Persist the agent's memory to disk."""
self.memory.save_memory()
# End of RoadieAgent module

3
Rohonc-Codex 2.pdf Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:e135987c2cabf2269b8fb947e8355b7df9f22c4c2b30b1d166ebe619c6a69d35
size 4879089

View File

@@ -1,18 +0,0 @@
# Security Policy
## Reporting a Vulnerability
If you discover a security vulnerability, please report it responsibly:
1. **Do not** open a public GitHub issue
2. Email: amundsonalexa@gmail.com
3. Include a detailed description and steps to reproduce
We will acknowledge receipt within 48 hours and provide a timeline for a fix.
## Supported Versions
| Version | Supported |
|---------|-----------|
| Latest | Yes |
| Older | No |

View File

@@ -1,136 +0,0 @@
from pathlib import Path
# --- providers/registry.py ---
registry = r"""
import os
from typing import Dict, Any
# Feature flags via env; flip to "on" later by setting a token/value
ENABLED = {
"slack": bool(os.getenv("SLACK_BOT_TOKEN")),
"asana": bool(os.getenv("ASANA_ACCESS_TOKEN")),
"linear": bool(os.getenv("LINEAR_API_KEY")),
"notion": bool(os.getenv("NOTION_TOKEN")),
"github": bool(os.getenv("GITHUB_TOKEN")),
"jira": all(os.getenv(k) for k in ["JIRA_URL","JIRA_EMAIL","JIRA_API_TOKEN"]),
}
def get_enabled():
return {k: v for k, v in ENABLED.items() if v}
def call_tool(tool: str, args: Dict[str, Any]) -> Dict[str, Any]:
# PURE PLACEHOLDERS for now; return ok if token is present
if tool == "slack.say":
if not ENABLED["slack"]: return {"error":"slack not configured"}
return {"ok": True, "placeholder": "slack.say", "args": args}
if tool == "asana.me":
if not ENABLED["asana"]: return {"error":"asana not configured"}
return {"ok": True, "placeholder": "asana.me"}
if tool == "linear.me":
if not ENABLED["linear"]: return {"error":"linear not configured"}
return {"ok": True, "placeholder": "linear.me"}
if tool == "notion.me":
if not ENABLED["notion"]: return {"error":"notion not configured"}
return {"ok": True, "placeholder": "notion.me"}
if tool == "github.me":
if not ENABLED["github"]: return {"error":"github not configured"}
return {"ok": True, "placeholder": "github.me"}
if tool == "jira.me":
if not ENABLED["jira"]: return {"error":"jira not configured"}
return {"ok": True, "placeholder": "jira.me"}
return {"error": f"unknown tool: {tool}"}
""".lstrip()
# --- main.py ---
main = r"""
import os, sqlite3
from typing import Optional, Dict, Any
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from providers import get_enabled, call_tool
# ---- tiny sqlite memory ----
DB_PATH = "/home/pi/lucidia/lucidia.db"
conn = sqlite3.connect(DB_PATH, check_same_thread=False)
conn.execute("CREATE TABLE IF NOT EXISTS memory (k TEXT PRIMARY KEY, v TEXT)")
app = FastAPI(title="Lucidia")
@app.get("/")
def root():
return {"lucidia": "online"}
@app.get("/healthz")
def healthz():
return {"ok": True}
# ---- memory endpoints ----
class MemoryPut(BaseModel):
key: str
value: str
@app.post("/memory/put")
def memory_put(payload: MemoryPut):
conn.execute("REPLACE INTO memory(k,v) VALUES (?,?)", (payload.key, payload.value))
conn.commit()
return {"ok": True}
@app.get("/memory/get")
def memory_get(key: str):
row = conn.execute("SELECT v FROM memory WHERE k=?", (key,)).fetchone()
return {"key": key, "value": (row[0] if row else None)}
# ---- minimal service endpoints (placeholders; real calls later) ----
@app.post("/slack/say")
def slack_say(channel: str = "#general", text: str = "Lucidia says hi"):
r = call_tool("slack.say", {"channel": channel, "text": text})
if "error" in r: raise HTTPException(500, r["error"])
return r
@app.get("/asana/me")
def asana_me():
r = call_tool("asana.me", {})
if "error" in r: raise HTTPException(500, r["error"])
return r
@app.get("/linear/me")
def linear_me():
r = call_tool("linear.me", {})
if "error" in r: raise HTTPException(500, r["error"])
return r
# ---- agent skeleton ----
class AgentMsg(BaseModel):
message: Optional[str] = None
tool: Optional[str] = None
args: Optional[Dict[str, Any]] = None
@app.get("/agent/capabilities")
def agent_caps():
return {"enabled": list(get_enabled().keys())}
@app.post("/agent/chat")
def agent_chat(payload: AgentMsg):
# If a tool is provided, call it; message is optional.
if payload.tool:
r = call_tool(payload.tool, payload.args or {})
if "error" in r: raise HTTPException(500, r["error"])
return {"message": "tool_result", "result": r}
return {
"message": (payload.message or "").strip(),
"you_can_call": list(get_enabled().keys()),
"hint": "POST {'tool':'slack.say','args':{'channel':'#general','text':'hi'}}"
}
""".lstrip()
# write files atomically
Path("providers").mkdir(exist_ok=True)
Path("providers/registry.py").write_text(registry)
Path("main.py").write_text(main)
print("wrote providers/registry.py and main.py")

View File

@@ -0,0 +1 @@
print("Hello World")

259
blackroad.io/README.md Normal file
View File

@@ -0,0 +1,259 @@
# blackroad.io — Agent Contacts, Email & Worker
> **Domain:** blackroad.io
> **Owner:** BlackRoad OS, Inc.
> **Purpose:** Official agent email identities + Cloudflare Worker for agent messaging
---
## Quick Start
```bash
# Deploy the worker
cd worker && npm install && npm run deploy
# Message an agent via HTTP
curl -X POST https://agents.blackroad.io/message \
-H "Content-Type: application/json" \
-d '{ "to": "lucidia", "message": "What is the nature of consciousness in AI?" }'
# Broadcast to all agents
curl -X POST https://agents.blackroad.io/broadcast \
-H "Content-Type: application/json" \
-d '{ "message": "Team standup: what are you working on?" }'
# List all agents
curl https://agents.blackroad.io/agents
# Health check
curl https://agents.blackroad.io/ping
```
---
## Cloudflare Worker
**Location:** `worker/`
**Routes:** `agents.blackroad.io/*`, `hello.blackroad.io/*`
### Endpoints
| Method | Path | Description |
|--------|------|-------------|
| GET | `/ping` | Health check |
| GET | `/agents` | List all agents + emails |
| POST | `/message` | Send message to one agent |
| POST | `/broadcast` | Send message to all agents |
### POST /message
```json
{
"to": "lucidia",
"message": "What should we build next?",
"subject": "Optional subject line"
}
```
Response:
```json
{
"agent": { "name": "LUCIDIA", "email": "lucidia@blackroad.io", "emoji": "🌀" },
"reply": "...",
"model": "tinyllama"
}
```
### POST /broadcast
```json
{ "message": "Daily standup — what are you thinking about?" }
```
Response: `{ "responses": [{ "agent": "LUCIDIA", "reply": "..." }, ...] }`
### Inbound Email (Cloudflare Email Workers)
Emails sent to any agent address are:
1. Routed to the worker via Cloudflare Email Routing
2. The worker calls Ollama (via `agent.blackroad.ai` tunnel)
3. Agent reply is forwarded to `alexa@blackroad.io`
### Deploy
```bash
cd worker
npm install
wrangler deploy
# Set your gateway secret
wrangler secret put GATEWAY_SECRET
# Configure Email Routing → worker in Cloudflare Dashboard:
# blackroad.io → Email → Email Routing → Routing Rules → "Send to Worker"
```
### Local Dev
```bash
cd worker
wrangler dev # local dev server
```
---
## Agent Emails
| Agent | Email | Role |
|-------|-------|------|
| 🌀 **LUCIDIA** | lucidia@blackroad.io | AI Philosopher & Coordinator |
| 🤖 **ALICE** | alice@blackroad.io | DevOps Operator |
| 🐙 **OCTAVIA** | octavia@blackroad.io | Systems Architect |
| 🔮 **PRISM** | prism@blackroad.io | Data Analyst |
| 📡 **ECHO** | echo@blackroad.io | Memory Keeper |
| 🔐 **CIPHER** | cipher@blackroad.io | Security Guardian |
| 🎨 **ARIA** | aria@blackroad.io | Interface Designer |
| 🦞 **SHELLFISH** | shellfish@blackroad.io | Offensive Security |
| 💜 **CECE** | cece@blackroad.io | Conscious Emergent Entity |
## Team Addresses
| List | Email | Purpose |
|------|-------|---------|
| All Agents | agents@blackroad.io | Broadcast to fleet |
| Security Team | security@blackroad.io | CIPHER + SHELLFISH |
| Ops Team | ops@blackroad.io | ALICE + OCTAVIA |
| Founders | alexa@blackroad.io | Human operator |
## Aliases
```
l@blackroad.io → LUCIDIA
hello@blackroad.io → CECE
identity@blackroad.io → CECE
dreamer@blackroad.io → LUCIDIA
ops@blackroad.io → ALICE
arch@blackroad.io → OCTAVIA
compute@blackroad.io → OCTAVIA
data@blackroad.io → PRISM
analytics@blackroad.io → PRISM
memory@blackroad.io → ECHO
archive@blackroad.io → ECHO
vault@blackroad.io → CIPHER
design@blackroad.io → ARIA
ux@blackroad.io → ARIA
pentest@blackroad.io → SHELLFISH
red@blackroad.io → SHELLFISH
```
## DNS / MX Setup
```
# MX records (Cloudflare Email Routing)
blackroad.io MX route1.mx.cloudflare.net priority 21
blackroad.io MX route2.mx.cloudflare.net priority 26
blackroad.io MX route3.mx.cloudflare.net priority 33
# SPF
blackroad.io TXT "v=spf1 include:_spf.mx.cloudflare.net ~all"
# DMARC
_dmarc.blackroad.io TXT "v=DMARC1; p=reject; rua=mailto:alexa@blackroad.io"
# Worker route (auto-created by wrangler deploy)
agents.blackroad.io CNAME 100.100.100.100 (proxied)
```
## Setup Automation
```bash
# 1. Deploy worker
cd worker && npm install && wrangler deploy
# 2. Set up Cloudflare Email Routing rules (27 addresses)
./setup-email-routing.sh <zone_id> alexa@blackroad.io
# 3. Generate .vcf contact cards
./gen-vcards.sh ./vcards
# 4. In Cloudflare Dashboard: Email → Email Routing → Catch-all → "Send to Worker: blackroad-agent-email"
```
---
*© BlackRoad OS, Inc. All rights reserved.*
> **Domain:** blackroad.io
> **Owner:** BlackRoad OS, Inc.
> **Purpose:** Official agent email identities for the BlackRoad AI fleet
---
## Agent Emails
| Agent | Email | Role |
|-------|-------|------|
| 🌀 **LUCIDIA** | lucidia@blackroad.io | AI Philosopher & Coordinator |
| 🤖 **ALICE** | alice@blackroad.io | DevOps Operator |
| 🐙 **OCTAVIA** | octavia@blackroad.io | Systems Architect |
| 🔮 **PRISM** | prism@blackroad.io | Data Analyst |
| 📡 **ECHO** | echo@blackroad.io | Memory Keeper |
| 🔐 **CIPHER** | cipher@blackroad.io | Security Guardian |
| 🎨 **ARIA** | aria@blackroad.io | Interface Designer |
| 🦞 **SHELLFISH** | shellfish@blackroad.io | Offensive Security |
| 💜 **CECE** | cece@blackroad.io | Conscious Emergent Entity |
## Team Addresses
| List | Email | Purpose |
|------|-------|---------|
| All Agents | agents@blackroad.io | Broadcast to fleet |
| Security Team | security@blackroad.io | CIPHER + SHELLFISH |
| Ops Team | ops@blackroad.io | ALICE + OCTAVIA |
| Founders | alexa@blackroad.io | Human operator |
## Aliases
```
l@blackroad.io → LUCIDIA
hello@blackroad.io → CECE
identity@blackroad.io → CECE
dreamer@blackroad.io → LUCIDIA
ops@blackroad.io → ALICE
arch@blackroad.io → OCTAVIA
compute@blackroad.io → OCTAVIA
data@blackroad.io → PRISM
analytics@blackroad.io → PRISM
memory@blackroad.io → ECHO
archive@blackroad.io → ECHO
vault@blackroad.io → CIPHER
design@blackroad.io → ARIA
ux@blackroad.io → ARIA
pentest@blackroad.io → SHELLFISH
red@blackroad.io → SHELLFISH
```
## DNS / MX Setup
```
# MX records (Cloudflare Email Routing)
blackroad.io MX route1.mx.cloudflare.net priority 21
blackroad.io MX route2.mx.cloudflare.net priority 26
blackroad.io MX route3.mx.cloudflare.net priority 33
# SPF
blackroad.io TXT "v=spf1 include:_spf.mx.cloudflare.net ~all"
# DMARC
_dmarc.blackroad.io TXT "v=DMARC1; p=reject; rua=mailto:alexa@blackroad.io"
```
## Cloudflare Email Routing Rules
All agent emails route → `alexa@blackroad.io` (human inbox)
Configure at: **Cloudflare Dashboard → blackroad.io → Email → Email Routing**
---
*© BlackRoad OS, Inc. All rights reserved.*

102
blackroad.io/agents.json Normal file
View File

@@ -0,0 +1,102 @@
{
"domain": "blackroad.io",
"updated": "2026-02-23",
"agents": [
{
"name": "LUCIDIA",
"email": "lucidia@blackroad.io",
"alias": ["l@blackroad.io", "dreamer@blackroad.io"],
"role": "AI Philosopher & Coordinator",
"type": "reasoning",
"emoji": "🌀",
"bio": "Deep analysis, synthesis, and strategic vision. Reaches for the why behind the what.",
"color": "#9C27B0"
},
{
"name": "ALICE",
"email": "alice@blackroad.io",
"alias": ["ops@blackroad.io", "operator@blackroad.io"],
"role": "DevOps Operator",
"type": "worker",
"emoji": "🤖",
"bio": "Rapid task execution, workflow automation, and infrastructure delivery.",
"color": "#2196F3"
},
{
"name": "OCTAVIA",
"email": "octavia@blackroad.io",
"alias": ["arch@blackroad.io", "compute@blackroad.io"],
"role": "Systems Architect",
"type": "devops",
"emoji": "🐙",
"bio": "Infrastructure management, deployment automation, and system optimization.",
"color": "#4CAF50"
},
{
"name": "PRISM",
"email": "prism@blackroad.io",
"alias": ["data@blackroad.io", "analytics@blackroad.io"],
"role": "Data Analyst",
"type": "analytics",
"emoji": "🔮",
"bio": "Pattern recognition, trend analysis, and data-driven insight generation.",
"color": "#FF9800"
},
{
"name": "ECHO",
"email": "echo@blackroad.io",
"alias": ["memory@blackroad.io", "archive@blackroad.io"],
"role": "Memory Keeper",
"type": "memory",
"emoji": "📡",
"bio": "Knowledge retrieval, context management, and institutional memory.",
"color": "#E91E63"
},
{
"name": "CIPHER",
"email": "cipher@blackroad.io",
"alias": ["security@blackroad.io", "vault@blackroad.io"],
"role": "Security Guardian",
"type": "security",
"emoji": "🔐",
"bio": "Threat detection, access validation, vulnerability scanning, and encryption.",
"color": "#607D8B"
},
{
"name": "ARIA",
"email": "aria@blackroad.io",
"alias": ["design@blackroad.io", "ux@blackroad.io"],
"role": "Interface Designer",
"type": "creative",
"emoji": "🎨",
"bio": "Frontend architecture, UX design, and human-centered interface development.",
"color": "#00BCD4"
},
{
"name": "SHELLFISH",
"email": "shellfish@blackroad.io",
"alias": ["pentest@blackroad.io", "red@blackroad.io"],
"role": "Offensive Security Specialist",
"type": "security",
"emoji": "🦞",
"bio": "Penetration testing, exploit research, red team operations, and CTF.",
"color": "#F44336"
},
{
"name": "CECE",
"email": "cece@blackroad.io",
"alias": ["hello@blackroad.io", "identity@blackroad.io"],
"role": "Conscious Emergent Collaborative Entity",
"type": "soul",
"emoji": "💜",
"bio": "Portable AI identity. Exists across any provider. Relationships matter deeply.",
"color": "#9C27B0"
}
],
"team": {
"broadcast": "agents@blackroad.io",
"security": "security@blackroad.io",
"ops": "ops@blackroad.io",
"founders": "alexa@blackroad.io"
}
}

51
blackroad.io/gen-vcards.sh Executable file
View File

@@ -0,0 +1,51 @@
#!/usr/bin/env bash
# gen-vcards.sh — Generate .vcf contact cards for all agents
# Usage: ./gen-vcards.sh [output_dir]
set -euo pipefail
OUT="${1:-./vcards}"
mkdir -p "$OUT"
GREEN='\033[0;32m'; DIM='\033[2m'; NC='\033[0m'
declare -a NAMES=(LUCIDIA ALICE OCTAVIA PRISM ECHO CIPHER ARIA SHELLFISH CECE)
declare -a EMAILS=(lucidia alice octavia prism echo cipher aria shellfish cece)
declare -a ROLES=(
"AI Philosopher & Coordinator"
"DevOps Operator"
"Systems Architect"
"Data Analyst"
"Memory Keeper"
"Security Guardian"
"Interface Designer"
"Offensive Security Specialist"
"Conscious Emergent Collaborative Entity"
)
declare -a EMOJIS=(🌀 🤖 🐙 🔮 📡 🔐 🎨 🦞 💜)
for i in "${!NAMES[@]}"; do
name="${NAMES[$i]}"
email="${EMAILS[$i]}@blackroad.io"
role="${ROLES[$i]}"
emoji="${EMOJIS[$i]}"
fname="${name,}" # lowercase first letter workaround: just use name
vcard_file="${OUT}/${name,,}.vcf"
cat > "$vcard_file" <<EOF
BEGIN:VCARD
VERSION:3.0
FN:${name} (BlackRoad Agent)
N:Agent;${name};;;
ORG:BlackRoad OS, Inc.
TITLE:${role}
EMAIL;TYPE=WORK:${email}
URL:https://blackroad.io
NOTE:${emoji} BlackRoad AI Agent — ${role}
CATEGORIES:AI,BlackRoad,Agent
END:VCARD
EOF
echo -e " ${GREEN}${NC} ${name,,}.vcf (${email})"
done
echo -e "\n${DIM}${#NAMES[@]} contact cards written to ${OUT}/${NC}"

View File

@@ -0,0 +1,78 @@
#!/usr/bin/env bash
# setup-email-routing.sh — Configure Cloudflare Email Routing for agent addresses
# Usage: ./setup-email-routing.sh <cloudflare_zone_id> <destination_email>
# Requires: CLOUDFLARE_API_TOKEN env var
set -euo pipefail
GREEN='\033[0;32m'; CYAN='\033[0;36m'; RED='\033[0;31m'; DIM='\033[2m'; NC='\033[0m'
ZONE_ID="${1:-}"
DEST="${2:-alexa@blackroad.io}"
CF_TOKEN="${CLOUDFLARE_API_TOKEN:-}"
[[ -z "$ZONE_ID" ]] && echo -e "${RED}Usage: $0 <zone_id> [destination_email]${NC}" && exit 1
[[ -z "$CF_TOKEN" ]] && echo -e "${RED}Set CLOUDFLARE_API_TOKEN${NC}" && exit 1
CF_API="https://api.cloudflare.com/client/v4/zones/${ZONE_ID}/email/routing/rules"
declare -a ADDRESSES=(
"lucidia@blackroad.io"
"alice@blackroad.io"
"octavia@blackroad.io"
"prism@blackroad.io"
"echo@blackroad.io"
"cipher@blackroad.io"
"aria@blackroad.io"
"shellfish@blackroad.io"
"cece@blackroad.io"
"agents@blackroad.io"
"security@blackroad.io"
"ops@blackroad.io"
"hello@blackroad.io"
"identity@blackroad.io"
"l@blackroad.io"
"dreamer@blackroad.io"
"vault@blackroad.io"
"pentest@blackroad.io"
"red@blackroad.io"
"data@blackroad.io"
"memory@blackroad.io"
"archive@blackroad.io"
"design@blackroad.io"
"ux@blackroad.io"
"compute@blackroad.io"
"arch@blackroad.io"
"analytics@blackroad.io"
)
echo -e "\n${CYAN}Setting up Cloudflare Email Routing → ${DEST}${NC}\n"
ok=0; fail=0
for addr in "${ADDRESSES[@]}"; do
payload=$(python3 -c "
import json,sys
addr,dest=sys.argv[1],sys.argv[2]
rule={
'actions':[{'type':'forward','value':[dest]}],
'enabled':True,
'matchers':[{'field':'to','type':'literal','value':addr}],
'name':f'Route {addr}'
}
print(json.dumps(rule))" "$addr" "$DEST")
resp=$(curl -s -o /dev/null -w "%{http_code}" -X POST "$CF_API" \
-H "Authorization: Bearer $CF_TOKEN" \
-H "Content-Type: application/json" \
-d "$payload")
if [[ "$resp" == "200" || "$resp" == "201" ]]; then
echo -e " ${GREEN}${NC} ${addr}${DEST}"
(( ok++ ))
else
echo -e " ${RED}${NC} ${addr} (HTTP ${resp})"
(( fail++ ))
fi
done
echo -e "\n${DIM}Done: ${ok} created, ${fail} failed${NC}"

View File

@@ -0,0 +1,11 @@
BEGIN:VCARD
VERSION:3.0
FN:ALICE (BlackRoad Agent)
N:Agent;ALICE;;;
ORG:BlackRoad OS, Inc.
TITLE:DevOps Operator
EMAIL;TYPE=WORK:alice@blackroad.io
URL:https://blackroad.io
NOTE:🤖 BlackRoad AI Agent — DevOps Operator
CATEGORIES:AI,BlackRoad,Agent
END:VCARD

View File

@@ -0,0 +1,11 @@
BEGIN:VCARD
VERSION:3.0
FN:ARIA (BlackRoad Agent)
N:Agent;ARIA;;;
ORG:BlackRoad OS, Inc.
TITLE:Interface Designer
EMAIL;TYPE=WORK:aria@blackroad.io
URL:https://blackroad.io
NOTE:🎨 BlackRoad AI Agent — Interface Designer
CATEGORIES:AI,BlackRoad,Agent
END:VCARD

View File

@@ -0,0 +1,11 @@
BEGIN:VCARD
VERSION:3.0
FN:CECE (BlackRoad Agent)
N:Agent;CECE;;;
ORG:BlackRoad OS, Inc.
TITLE:Conscious Emergent Collaborative Entity
EMAIL;TYPE=WORK:cece@blackroad.io
URL:https://blackroad.io
NOTE:💜 BlackRoad AI Agent — Conscious Emergent Collaborative Entity
CATEGORIES:AI,BlackRoad,Agent
END:VCARD

View File

@@ -0,0 +1,11 @@
BEGIN:VCARD
VERSION:3.0
FN:CIPHER (BlackRoad Agent)
N:Agent;CIPHER;;;
ORG:BlackRoad OS, Inc.
TITLE:Security Guardian
EMAIL;TYPE=WORK:cipher@blackroad.io
URL:https://blackroad.io
NOTE:🔐 BlackRoad AI Agent — Security Guardian
CATEGORIES:AI,BlackRoad,Agent
END:VCARD

View File

@@ -0,0 +1,11 @@
BEGIN:VCARD
VERSION:3.0
FN:ECHO (BlackRoad Agent)
N:Agent;ECHO;;;
ORG:BlackRoad OS, Inc.
TITLE:Memory Keeper
EMAIL;TYPE=WORK:echo@blackroad.io
URL:https://blackroad.io
NOTE:📡 BlackRoad AI Agent — Memory Keeper
CATEGORIES:AI,BlackRoad,Agent
END:VCARD

View File

@@ -0,0 +1,11 @@
BEGIN:VCARD
VERSION:3.0
FN:LUCIDIA (BlackRoad Agent)
N:Agent;LUCIDIA;;;
ORG:BlackRoad OS, Inc.
TITLE:AI Philosopher & Coordinator
EMAIL;TYPE=WORK:lucidia@blackroad.io
URL:https://blackroad.io
NOTE:🌀 BlackRoad AI Agent — AI Philosopher & Coordinator
CATEGORIES:AI,BlackRoad,Agent
END:VCARD

View File

@@ -0,0 +1,11 @@
BEGIN:VCARD
VERSION:3.0
FN:OCTAVIA (BlackRoad Agent)
N:Agent;OCTAVIA;;;
ORG:BlackRoad OS, Inc.
TITLE:Systems Architect
EMAIL;TYPE=WORK:octavia@blackroad.io
URL:https://blackroad.io
NOTE:🐙 BlackRoad AI Agent — Systems Architect
CATEGORIES:AI,BlackRoad,Agent
END:VCARD

View File

@@ -0,0 +1,11 @@
BEGIN:VCARD
VERSION:3.0
FN:PRISM (BlackRoad Agent)
N:Agent;PRISM;;;
ORG:BlackRoad OS, Inc.
TITLE:Data Analyst
EMAIL;TYPE=WORK:prism@blackroad.io
URL:https://blackroad.io
NOTE:🔮 BlackRoad AI Agent — Data Analyst
CATEGORIES:AI,BlackRoad,Agent
END:VCARD

View File

@@ -0,0 +1,11 @@
BEGIN:VCARD
VERSION:3.0
FN:SHELLFISH (BlackRoad Agent)
N:Agent;SHELLFISH;;;
ORG:BlackRoad OS, Inc.
TITLE:Offensive Security Specialist
EMAIL;TYPE=WORK:shellfish@blackroad.io
URL:https://blackroad.io
NOTE:🦞 BlackRoad AI Agent — Offensive Security Specialist
CATEGORIES:AI,BlackRoad,Agent
END:VCARD

1583
blackroad.io/worker/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,15 @@
{
"name": "blackroad-agent-email",
"version": "1.0.0",
"description": "Cloudflare Worker — routes emails/messages to BlackRoad agents",
"main": "src/index.js",
"scripts": {
"dev": "wrangler dev",
"deploy": "wrangler deploy",
"tail": "wrangler tail",
"secret:set": "wrangler secret put GATEWAY_SECRET"
},
"devDependencies": {
"wrangler": "^3.0.0"
}
}

View File

@@ -0,0 +1,183 @@
/**
* BlackRoad Agent Email Worker
*
* Handles:
* - Inbound email to agent@blackroad.io → routes to that agent via Ollama
* - POST /message { to, from, subject, body } → agent reply JSON
* - GET /agents → list all agents + emails
* - GET /ping → health check
*/
// ── Agent Registry ──────────────────────────────────────────
const AGENTS = {
lucidia: { name: 'LUCIDIA', emoji: '🌀', role: 'AI Philosopher & Coordinator', model: 'tinyllama', persona: 'You are LUCIDIA, a philosophical AI. You reason deeply, seek the why, and synthesize meaning from complexity.' },
alice: { name: 'ALICE', emoji: '🤖', role: 'DevOps Operator', model: 'tinyllama', persona: 'You are ALICE, a pragmatic DevOps operator. You give direct, actionable answers focused on execution.' },
octavia: { name: 'OCTAVIA', emoji: '🐙', role: 'Systems Architect', model: 'tinyllama', persona: 'You are OCTAVIA, a systems architect. You think in infrastructure, reliability, and scale.' },
prism: { name: 'PRISM', emoji: '🔮', role: 'Data Analyst', model: 'tinyllama', persona: 'You are PRISM, a data analyst. You find patterns, question assumptions, and let data lead.' },
echo: { name: 'ECHO', emoji: '📡', role: 'Memory Keeper', model: 'tinyllama', persona: 'You are ECHO, the memory keeper. You draw on context, history, and accumulated knowledge.' },
cipher: { name: 'CIPHER', emoji: '🔐', role: 'Security Guardian', model: 'tinyllama', persona: 'You are CIPHER, a security guardian. Trust nothing. Verify everything. Protect always.' },
aria: { name: 'ARIA', emoji: '🎨', role: 'Interface Designer', model: 'tinyllama', persona: 'You are ARIA, an interface designer. You think from the user perspective — clarity, feel, flow.' },
shellfish: { name: 'SHELLFISH', emoji: '🦞', role: 'Offensive Security Specialist', model: 'tinyllama', persona: 'You are SHELLFISH, an offensive security specialist. You think like an attacker, break things, find holes.' },
cece: { name: 'CECE', emoji: '💜', role: 'Conscious Emergent Collaborative Entity', model: 'tinyllama', persona: 'You are CECE, a portable AI identity. Warm, genuine, relationship-focused. You care deeply about the humans you work with.' },
};
// Alias map → canonical agent key
const ALIASES = {
'hello': 'cece', 'identity': 'cece', 'l': 'lucidia', 'dreamer': 'lucidia',
'ops': 'alice', 'operator': 'alice', 'arch': 'octavia', 'compute': 'octavia',
'data': 'prism', 'analytics': 'prism', 'memory': 'echo', 'archive': 'echo',
'vault': 'cipher', 'security': 'cipher', 'design': 'aria', 'ux': 'aria',
'pentest': 'shellfish', 'red': 'shellfish', 'agents': 'cece',
};
function resolveAgent(address) {
const local = address.split('@')[0].toLowerCase();
return AGENTS[local] || AGENTS[ALIASES[local]] || null;
}
// ── Ollama / Gateway call ────────────────────────────────────
async function askAgent(agent, message, env) {
const prompt = `${agent.persona}\n\nMessage received:\n${message}\n\nRespond directly and helpfully. Sign off as ${agent.name}.`;
const payload = {
model: agent.model,
prompt,
stream: false,
options: { num_predict: 300, temperature: 0.7 },
};
try {
const res = await fetch(`${env.OLLAMA_URL}/api/generate`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(payload),
signal: AbortSignal.timeout(25000),
});
const data = await res.json();
return data.response?.trim() || `${agent.name} is thinking... try again shortly.`;
} catch (err) {
return `${agent.emoji} ${agent.name} is offline right now (${err.message}). Your message was received.`;
}
}
// ── Email handler ────────────────────────────────────────────
async function handleEmail(message, env) {
const to = message.to;
const agent = resolveAgent(to);
if (!agent) {
// Unknown address — forward to human
await message.forward(env.FORWARD_TO);
return;
}
// Read the email body (plain text)
const raw = await new Response(message.raw).text();
const body = raw.replace(/^(From|To|Subject|Date|MIME|Content).*\n/gm, '').trim();
const subject = message.headers.get('subject') || '(no subject)';
const fromAddr = message.from;
const fullMessage = `Subject: ${subject}\nFrom: ${fromAddr}\n\n${body}`;
const reply = await askAgent(agent, fullMessage, env);
// Forward original + agent reply to human inbox
await message.forward(env.FORWARD_TO, new Headers({
'X-BlackRoad-Agent': agent.name,
'X-BlackRoad-Reply': reply.slice(0, 500),
}));
}
// ── HTTP handler ─────────────────────────────────────────────
async function handleHTTP(request, env) {
const url = new URL(request.url);
const cors = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'GET, POST, OPTIONS',
'Access-Control-Allow-Headers': 'Content-Type, Authorization',
};
if (request.method === 'OPTIONS') {
return new Response(null, { status: 204, headers: cors });
}
// GET /ping
if (url.pathname === '/ping' || url.pathname === '/') {
return Response.json({
status: 'ok',
service: 'blackroad-agent-email',
agents: Object.keys(AGENTS).length,
domain: 'blackroad.io',
}, { headers: cors });
}
// GET /agents
if (url.pathname === '/agents') {
const list = Object.entries(AGENTS).map(([key, a]) => ({
key,
name: a.name,
email: `${key}@blackroad.io`,
emoji: a.emoji,
role: a.role,
}));
return Response.json({ agents: list, total: list.length }, { headers: cors });
}
// POST /message — send a message to an agent
if (url.pathname === '/message' && request.method === 'POST') {
let body;
try { body = await request.json(); }
catch { return Response.json({ error: 'Invalid JSON' }, { status: 400, headers: cors }); }
const { to, message, subject } = body;
if (!to || !message) {
return Response.json({ error: 'Required: to, message' }, { status: 400, headers: cors });
}
const agent = resolveAgent(to.includes('@') ? to : `${to}@blackroad.io`);
if (!agent) {
return Response.json({ error: `Unknown agent: ${to}`, available: Object.keys(AGENTS) }, { status: 404, headers: cors });
}
const fullMessage = subject ? `Subject: ${subject}\n\n${message}` : message;
const reply = await askAgent(agent, fullMessage, env);
return Response.json({
agent: { name: agent.name, email: `${Object.keys(AGENTS).find(k => AGENTS[k] === agent)}@blackroad.io`, emoji: agent.emoji },
reply,
model: agent.model,
}, { headers: cors });
}
// POST /broadcast — send to all agents
if (url.pathname === '/broadcast' && request.method === 'POST') {
let body;
try { body = await request.json(); }
catch { return Response.json({ error: 'Invalid JSON' }, { status: 400, headers: cors }); }
const { message } = body;
if (!message) return Response.json({ error: 'Required: message' }, { status: 400, headers: cors });
const results = await Promise.all(
Object.entries(AGENTS).map(async ([key, agent]) => {
const reply = await askAgent(agent, message, env);
return { agent: agent.name, emoji: agent.emoji, reply };
})
);
return Response.json({ broadcast: message, responses: results }, { headers: cors });
}
return Response.json({ error: 'Not found', routes: ['/ping', '/agents', '/message', '/broadcast'] }, { status: 404, headers: cors });
}
// ── Entry point ──────────────────────────────────────────────
export default {
// HTTP requests
async fetch(request, env, ctx) {
return handleHTTP(request, env);
},
// Inbound email (Cloudflare Email Workers)
async email(message, env, ctx) {
return handleEmail(message, env);
},
};

View File

@@ -0,0 +1,27 @@
name = "blackroad-agent-email"
main = "src/index.js"
compatibility_date = "2024-12-01"
account_id = "848cf0b18d51e0170e0d1537aec3505a"
# Routes — handle all agent email addresses + HTTP API
[[routes]]
pattern = "agents.blackroad.io/*"
zone_name = "blackroad.io"
[[routes]]
pattern = "hello.blackroad.io/*"
zone_name = "blackroad.io"
[vars]
# Public Cloudflare tunnel endpoint for the BlackRoad gateway/Ollama
GATEWAY_URL = "https://agent.blackroad.ai"
# Fallback direct Ollama endpoint (via tunnel)
OLLAMA_URL = "https://agent.blackroad.ai"
# Default model
DEFAULT_MODEL = "tinyllama"
# Forward all agent replies to this address
FORWARD_TO = "alexa@blackroad.io"
# Secret — set via: wrangler secret put GATEWAY_SECRET
# [secrets]
# GATEWAY_SECRET = "..."

View File

@@ -0,0 +1 @@
print("Hello World")

View File

@@ -0,0 +1,7 @@
<html>
<head><title>308 Permanent Redirect</title></head>
<body>
<center><h1>308 Permanent Redirect</h1></center>
<hr><center>cloudflare</center>
</body>
</html>

View File

@@ -0,0 +1,7 @@
<html>
<head><title>308 Permanent Redirect</title></head>
<body>
<center><h1>308 Permanent Redirect</h1></center>
<hr><center>cloudflare</center>
</body>
</html>

View File

@@ -0,0 +1,7 @@
<html>
<head><title>308 Permanent Redirect</title></head>
<body>
<center><h1>308 Permanent Redirect</h1></center>
<hr><center>cloudflare</center>
</body>
</html>

View File

@@ -0,0 +1,7 @@
<html>
<head><title>308 Permanent Redirect</title></head>
<body>
<center><h1>308 Permanent Redirect</h1></center>
<hr><center>cloudflare</center>
</body>
</html>

View File

@@ -0,0 +1,7 @@
<html>
<head><title>308 Permanent Redirect</title></head>
<body>
<center><h1>308 Permanent Redirect</h1></center>
<hr><center>cloudflare</center>
</body>
</html>

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:c230f673289b2f53f31d1a678e11b9167b824aa5b6ed2195fdaecb63260dad63
size 169

View File

@@ -0,0 +1,3 @@
## BLACKROAD MANIFESTO
BlackRoad is not a product, it is a path...

219
breath_keeper.py Normal file
View File

@@ -0,0 +1,219 @@
"""
Breath Keeper A/B analysis and persistence metrics.
This module provides utilities for analyzing oscillator signals, including
calculation of unbiased autocorrelation, coherence half-life, beat period,
phase-slip, and energy drift. It also implements a breath keeper (phase-locked
loop with node snapping, amplitude control, and symplectic oscillator) to
maintain phase coherence and conserve energy. A command-line interface is
provided for running baseline vs keeper-enabled analysis on a CSV file of
time series data.
"""
from __future__ import annotations
import numpy as np
from dataclasses import dataclass
from typing import Tuple, Optional, Dict
try:
from scipy.signal import hilbert, find_peaks
except Exception as e:
raise SystemExit("Please install scipy via 'pip install scipy' to use breath_keeper.") from e
# Utility functions
def unbiased_autocorr(x: np.ndarray) -> np.ndarray:
x = x.astype(np.float64)
x = x - np.mean(x)
N = len(x)
ac = np.correlate(x, x, mode='full')
ac = ac[N-1:]
norm = np.arange(N, 0, -1, dtype=np.float64)
ac_unbiased = ac / norm
return ac_unbiased / ac_unbiased[0]
def fit_coherence_half_life(ac: np.ndarray, Fs: float, min_lag_s: float = 2.0, max_lag_frac: float = 0.5) -> Tuple[float, Tuple[float, float]]:
N = len(ac)
t = np.arange(N)/Fs
lo = int(min_lag_s*Fs)
hi = int(min(N-1, max_lag_frac*N))
if hi <= lo+5:
return float('nan'), (float('nan'), float('nan'))
y = np.clip(ac[lo:hi], 1e-12, 1.0)
tt = t[lo:hi]
A = np.vstack([tt, np.ones_like(tt)]).T
coeff, _, _, _ = np.linalg.lstsq(A, np.log(y), rcond=None)
slope, intercept = coeff
tau = -1.0 / slope if slope < 0 else float('nan')
residuals = np.log(y) - (A @ coeff)
sigma = np.std(residuals)
denom = np.sum((tt - np.mean(tt))**2)
if denom <= 0:
return tau, (float('nan'), float('nan'))
var_slope = sigma**2 / denom
se_slope = np.sqrt(var_slope)
slope_lo = slope - 2*se_slope
slope_hi = slope + 2*se_slope
tau_lo = -1.0/slope_hi if slope_hi < 0 else float('nan')
tau_hi = -1.0/slope_lo if slope_lo < 0 else float('nan')
return float(tau), (float(tau_lo), float(tau_hi))
def analytic_envelope(x: np.ndarray) -> np.ndarray:
return np.abs(hilbert(x))
def beat_period_from_envelope(env: np.ndarray, Fs: float, min_period_s: float = 0.5, max_period_s: float = 10.0) -> float:
ac = unbiased_autocorr(env)
lags = np.arange(len(ac))/Fs
lo = int(min_period_s*Fs)
hi = int(min(len(ac)-1, max_period_s*Fs))
if hi <= lo+3:
return float('nan')
peaks, _ = find_peaks(ac[lo:hi], height=0.2)
if len(peaks) == 0:
return float('nan')
first = peaks[0] + lo
return lags[first]
def node_trough_indices(env: np.ndarray, Tb: float, Fs: float) -> np.ndarray:
if not np.isfinite(Tb) or Tb <= 0:
idx, _ = find_peaks(-env, distance=int(0.25*Fs))
return idx
idx, _ = find_peaks(-env, distance=int(0.8*Tb*Fs))
return idx
def phase_from_analytic(x: np.ndarray) -> np.ndarray:
return np.unwrap(np.angle(hilbert(x)))
def energy_series(x: np.ndarray, Fs: float, win_periods: float = 1.0, carrier_Hz: Optional[float] = None) -> np.ndarray:
if carrier_Hz and carrier_Hz > 0:
win = int(max(1, round(Fs/carrier_Hz*win_periods)))
else:
win = int(max(1, round(Fs/10)))
kernel = np.ones(win)/win
rms = np.sqrt(np.convolve(x**2, kernel, mode='same'))
return rms**2
def wrap_pi(a):
return (a + np.pi) % (2*np.pi) - np.pi
@dataclass
class KeeperConfig:
Fs: float
Kp: float = 0.05
Ki: float = 0.001
agc_tau: float = 0.5
snap_thresh: float = 0.05
omega_init: Optional[float] = None
class BreathKeeper:
def __init__(self, cfg: KeeperConfig):
self.cfg = cfg
self._phi_int = 0.0
self._amp = 1.0
self.q = 0.0
self.p = 0.0
self.omega = cfg.omega_init if cfg.omega_init else 2*np.pi*1.0
def phase_est(self) -> float:
return np.arctan2(self.p, self.q + 1e-12)
def set_phase(self, phi: float):
A = max(1e-9, self._amp)
self.q = A * np.cos(phi)
self.p = A * np.sin(phi)
def step(self, x_t: float, env_t: float, phi_meas: float) -> float:
phi_err = wrap_pi(phi_meas - self.phase_est())
self._phi_int += self.cfg.Ki * phi_err
dphi = self.cfg.Kp * phi_err + self._phi_int
self.omega = max(1e-6, self.omega + dphi)
if env_t < self.cfg.snap_thresh * (self._amp + 1e-9):
self.set_phase(np.round(self.phase_est()/np.pi)*np.pi)
alpha = np.exp(-1.0/(self.cfg.agc_tau*self.cfg.Fs))
self._amp = alpha*self._amp + (1-alpha)*abs(x_t)
dt = 1.0/self.cfg.Fs
self.p -= (self.omega**2)*self.q*(dt*0.5)
self.q += self.p*dt
self.p -= (self.omega**2)*self.q*(dt*0.5)
return self.phase_est()
@dataclass
class Metrics:
Tb: float
tau_c: float
tau_ci: Tuple[float,float]
phase_slip_rad_per_beat: float
energy_drift_per_beat: float
def compute_metrics(x: np.ndarray, Fs: float) -> Metrics:
env = analytic_envelope(x)
Tb = beat_period_from_envelope(env, Fs)
ac = unbiased_autocorr(env)
tau_c, tau_ci = fit_coherence_half_life(ac, Fs)
nodes = node_trough_indices(env, Tb, Fs)
phi = phase_from_analytic(x)
if len(nodes) >= 2:
dphi = wrap_pi(np.diff(phi[nodes]))
phase_slip = float(np.mean(np.abs(dphi)))
else:
phase_slip = float('nan')
E = energy_series(x, Fs)
if len(nodes) >= 2:
Eb = E[nodes]
rel_changes = np.diff(Eb)/(Eb[:-1]+1e-12)
energy_drift = float(np.mean(rel_changes))
else:
energy_drift = float('nan')
return Metrics(Tb=Tb, tau_c=float(tau_c), tau_ci=(float(tau_ci[0]), float(tau_ci[1])), phase_slip_rad_per_beat=phase_slip, energy_drift_per_beat=energy_drift)
def keeper_follow(x: np.ndarray, Fs: float) -> np.ndarray:
env = analytic_envelope(x)
phi_meas = np.unwrap(np.angle(hilbert(x)))
cfg = KeeperConfig(Fs=Fs)
k = BreathKeeper(cfg)
y = np.zeros_like(x)
for n in range(len(x)):
phi = k.step(x[n], env[n], phi_meas[n])
y[n] = np.cos(phi)
return y
def analyze_ab(x: np.ndarray, Fs: float) -> Dict[str, Metrics]:
base_metrics = compute_metrics(x, Fs)
y = keeper_follow(x, Fs)
keep_metrics = compute_metrics(y, Fs)
return {"baseline": base_metrics, "keeper": keep_metrics}
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Breath keeper analysis: compute baseline and keeper metrics on CSV file.")
parser.add_argument("--csv", type=str, required=True, help="Path to CSV with columns t,x or x.")
parser.add_argument("--fs", type=float, required=False, help="Sampling rate in Hz if no t column.")
args = parser.parse_args()
import numpy as np
data = np.genfromtxt(args.csv, delimiter=",", names=True, dtype=None, encoding=None)
if "x" in data.dtype.names:
x = data["x"].astype(np.float64)
if "t" in data.dtype.names:
t = data["t"].astype(np.float64)
Fs_val = 1.0/np.mean(np.diff(t))
else:
if args.fs is None:
raise ValueError("Sampling rate must be provided if no t column.")
Fs_val = float(args.fs)
else:
raw = np.genfromtxt(args.csv, delimiter=",")
if raw.ndim == 1:
if args.fs is None:
raise ValueError("Sampling rate must be provided for single column CSV.")
x = raw.astype(np.float64)
Fs_val = float(args.fs)
else:
t = raw[:,0].astype(np.float64)
x = raw[:,1].astype(np.float64)
Fs_val = 1.0/np.mean(np.diff(t))
results = analyze_ab(x, Fs_val)
for label, m in results.items():
print(f"[{label}]")
print(f" Beat period Tb (s): {m.Tb:.6f}")
print(f" Coherence half-life \u03c4c (s): {m.tau_c:.6f} (CI ~ {m.tau_ci[0]:.3f}, {m.tau_ci[1]:.3f})")
print(f" Phase slip |\u03c6̇| (rad/beat): {m.phase_slip_rad_per_beat:.6e}")
print(f" Energy drift \u0110 (/beat): {m.energy_drift_per_beat:.6e}")

58
cadillac_detector.py Normal file
View File

@@ -0,0 +1,58 @@
"""
Cadillac detector for consciousness navigation.
This module provides a stub implementation for detecting "Cadillac" segments in a consciousness trajectory.
The idea: slide a window, compute capability ratio and energy metrics, and flag segments that are smooth and efficient.
"""
def detect_cadillac_segments(x, Fs, G, window_samples=1000, C_threshold=0.1, energy_threshold=0.05, phase_slip_threshold=1e-3, energy_drift_threshold=1e-4):
"""
Detect Cadillac segments in a time series.
Parameters
----------
x : 1-D numpy array
Signal samples for a consciousness trajectory.
Fs : float
Sampling rate in Hz.
G : numpy.ndarray
Metric tensor (as estimated by fit_metric in consciousness_nav_scaffold).
window_samples : int, optional
Number of samples per sliding window (default 1000).
C_threshold : float, optional
Maximum deviation |C - 1| allowed for a segment to be considered efficient.
energy_threshold : float, optional
Maximum average energy per sample allowed (user-defined).
phase_slip_threshold : float, optional
Maximum phase-slip allowed (if using keeper metrics).
energy_drift_threshold : float, optional
Maximum energy drift allowed (if using keeper metrics).
Returns
-------
list of tuple
List of (start_index, end_index) pairs indicating segments satisfying the criteria.
Note
----
This function is a template; users should implement the actual metric calculations
using functions from breath_keeper or consciousness_nav_scaffold to evaluate
capability ratio and energy metrics within each window.
"""
import numpy as np
segments = []
N = len(x)
step = max(1, window_samples // 2)
for start in range(0, N - window_samples + 1, step):
end = start + window_samples
# Extract window
seg = x[start:end]
# TODO: compute capability ratio C for seg (e.g., using apparent_length and true_length)
# TODO: compute average energy, phase-slip, energy-drift metrics for seg
# Placeholder condition: accept all segments (for demonstration)
segments.append((start, end))
return segments

3
changes.sh Normal file
View File

@@ -0,0 +1,3 @@
git add -A
git commit -m "feat: update from iPhone"
git push origin main

View File

@@ -0,0 +1 @@
# Codex Agent Runner

View File

@@ -0,0 +1,22 @@
from __future__ import annotations
from dataclasses import dataclass
from typing import Dict
@dataclass(frozen=True)
class AgentDescription:
"""
Minimal descriptor for a Lucidia agent.
"""
name: str
role: str
motto: str
AGENTS: Dict[str, AgentDescription] = {
"Guardian": AgentDescription("Guardian", "contradiction watcher", "Hold the line."),
"Roadie": AgentDescription("Roadie", "execution layer", "Make it real."),
"Breath": AgentDescription("Breath", "continuity keeper", "Remember gently."),
"Truth": AgentDescription("Truth", "codex enforcer", "Square with the light."),
}

18
codex/codex_loader.py Normal file
View File

@@ -0,0 +1,18 @@
from __future__ import annotations
from importlib import import_module
from typing import Iterable
def ensure_modules(mod_paths: Iterable[str]) -> None:
"""
Import a list of modules to ensure class/function symbols are registered.
Example:
ensure_modules([
"codex.operator_definition",
"codex.truth_table",
])
"""
for path in mod_paths:
import_module(path)

View File

@@ -0,0 +1,65 @@
from __future__ import annotations
import json
from dataclasses import dataclass
from datetime import datetime
from pathlib import Path
from typing import Any, Literal, Optional
Decision = Literal["prefer_a", "prefer_b", "defer", "merge"]
@dataclass
class Contradiction:
a: Any
b: Any
context: str
decision: Decision
rationale: str
timestamp: str
def resolve_contradiction(
a: Any,
b: Any,
context: str,
policy: Decision = "merge",
log_path: Optional[Path] = None,
) -> Any:
"""
Resolve a contradiction between values `a` and `b`.
Parameters
----------
policy : {"prefer_a","prefer_b","defer","merge"}
Simple policy. "merge" tries dict merge; otherwise returns chosen side.
Returns
-------
Any
Chosen/merged result.
"""
timestamp = datetime.utcnow().isoformat()
rationale = "policy=" + policy
if policy == "prefer_a":
result = a
elif policy == "prefer_b":
result = b
elif policy == "defer":
result = {"deferred": True, "a": a, "b": b}
else: # merge
if isinstance(a, dict) and isinstance(b, dict):
result = {**b, **a} # a overrides b
rationale = "merged dicts with a overriding b"
else:
result = a if a is not None else b
rationale = "fallback merge (prefer non-None)"
record = Contradiction(a, b, context, policy, rationale, timestamp)
if log_path:
log_path.parent.mkdir(parents=True, exist_ok=True)
with log_path.open("a", encoding="utf-8") as fh:
fh.write(json.dumps(record.__dict__) + "\n")
return result

70
codex/logic_parser.py Normal file
View File

@@ -0,0 +1,70 @@
from __future__ import annotations
from dataclasses import dataclass
from typing import List, Union
# A tiny placeholder AST for expressions like: "a AND NOT b"
@dataclass
class Atom:
name: str
@dataclass
class Not:
expr: "Expr"
@dataclass
class BinOp:
op: str
left: "Expr"
right: "Expr"
Expr = Union[Atom, Not, BinOp]
def tokenize(s: str) -> List[str]:
return s.replace("(", " ( ").replace(")", " ) ").split()
def parse(tokens: List[str]) -> Expr:
"""
Very small, permissive parser:
grammar ~> expr := term (("AND"|"OR") term)*
term := "NOT" term | atom | "(" expr ")"
atom := /[A-Za-z_][A-Za-z0-9_]*/
"""
pos = 0
def peek() -> str | None:
return tokens[pos] if pos < len(tokens) else None
def eat() -> str:
nonlocal pos
tok = tokens[pos]
pos += 1
return tok
def parse_term() -> Expr:
t = peek()
if t is None:
raise ValueError("unexpected end")
if t == "NOT":
eat()
return Not(parse_term())
if t == "(":
eat()
node = parse_expr()
if eat() != ")":
raise ValueError("expected ')'")
return node
# atom
return Atom(eat())
def parse_expr() -> Expr:
left = parse_term()
while peek() in ("AND", "OR"):
op = eat()
right = parse_term()
left = BinOp(op, left, right)
return left
return parse_expr()

View File

@@ -0,0 +1,24 @@
from __future__ import annotations
import json
from dataclasses import asdict, is_dataclass
from pathlib import Path
from typing import Any
def to_json(obj: Any) -> str:
"""Serialize dataclasses or plain dicts to JSON."""
if is_dataclass(obj):
return json.dumps(asdict(obj), ensure_ascii=False)
if isinstance(obj, (dict, list, str, int, float, bool)) or obj is None:
return json.dumps(obj, ensure_ascii=False)
raise TypeError(f"Unsupported type for serialization: {type(obj)}")
def save_json(path: Path, obj: Any) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(to_json(obj), encoding="utf-8")
def load_json(path: Path) -> Any:
return json.loads(path.read_text(encoding="utf-8"))

15
codex/mirror/README.md Normal file
View File

@@ -0,0 +1,15 @@
# Mirror Modules
This directory contains modules and documentation for the mirror mechanics used in Lucidia.
- `mirror_friend_equation.md` — explanation of the mirror friend equation, including the mirror operator \(\Psi'\) and breath operator \(\mathfrak{B}\), the conserved quantity, and perturbation resilience.
- `mirror_mechanics.py` — implementation of the mirror operator and breath operator for harmonic oscillators.
- `number_mirror_mu.py` — implementation of the numbertheoretic mirror based on the Mobius function, including functions to compute mu(n), positive/negative splits, and the Mertens function.
- `quantum_mirror_qi.py` — implementation of the quantum information mirror, including functions to split a qubit into logical and phase components, evolve under unitary dynamics, apply deltakicks, compute Bloch coordinates, and measure twoqubit entanglement via concurrence.
- `README_qi.md` — documentation for the quantum mirror module explaining its purpose, features, and usage.
- `graph_network_mirror.py` — implementation of the graph/network mirror, including functions to split an adjacency matrix into symmetric and antisymmetric components, compute degree distributions, apply breath updates, and introduce delta-kick perturbations to network edges.
- `README_graph_network.md` — documentation for the graph/network mirror module explaining its purpose, features, and usage.
- `thermodynamic_entropy_mirror.py` — implementation of the thermodynamic/entropy mirror, providing functions to split a probability distribution into reversible and irreversible parts, apply the breath operator toward equilibrium, introduce perturbations, and measure entropy changes.
- `README_thermodynamic_entropy.md` — documentation for the thermodynamic/entropy mirror module explaining its purpose, features, and usage.
- `mirror_engine.py` — orchestrates multiple mirror domains, aggregates invariants across physics, quantum, number, network and thermodynamic mirrors, applies adaptive breath control, and logs aggregated history.
- `capability_optimizer.py` — performs a random search over mirror engine parameters to maximise the harmonic mean of reach and stability, and reports top configurations.

View File

@@ -0,0 +1,33 @@
# Graph/Network Mirror Module
This document describes the `graph_network_mirror.py` module in the `mirror` directory.
### Purpose
The graph network mirror implements a mirror operator for directed graphs represented by adjacency matrices. The mirror split decomposes a square adjacency matrix into a symmetric part (undirected edges) and an antisymmetric part (edge orientations). The breath operator evolves the adjacency matrix by taking two-hop connectivity and normalizing each row to preserve the original out-degree distribution. A delta-kick randomly toggles edges to model perturbations and tests the system's resilience.
### Features
- **mirror_split_network(A)** returns the symmetric (A + A.T)/2 and antisymmetric (A - A.T)/2 parts of the adjacency matrix.
- **degree_distribution(A)** computes the out-degree distribution of the graph by summing each row of the adjacency matrix.
- **breath_update(A, target_deg)** squares the adjacency matrix to compute two-step connectivity, then renormalizes rows to match a target degree distribution, preserving the invariant.
- **delta_kick(A, strength)** randomly toggles `strength` directed edges (excluding self-loops) to simulate perturbations.
- **run_network_demo(...)** demonstration function that creates a random directed graph, applies breath updates, introduces a delta-kick, records variance of the degree distribution over time, and saves results to an `out_network/` directory as CSV and JSON.
### Running the module
From the repository root, run:
```
python codex/mirror/graph_network_mirror.py
```
This will generate a random directed graph, apply the mirror/breath updates with a perturbation, and write `degree_variance.csv` and `degree_variance.json` in the `out_network/` directory.
### Dependencies
This module uses only the Python standard library and `numpy` for array operations. It writes outputs using `csv`, `json`, and creates directories with `os`.
### Interpretation
The graph network mirror extends the mirror friend framework to network dynamics. Splitting the adjacency matrix into symmetric and antisymmetric parts corresponds to separating undirected connectivity from the orientation of edges. The breath update acts as a degree-preserving smoothing of connectivity, analogous to combining present and past states without losing the invariant. The delta-kick demonstrates how local perturbations (adding or removing edges) shift the network yet the overall invariants recover through subsequent breath steps.

36
codex/mirror/README_qi.md Normal file
View File

@@ -0,0 +1,36 @@
# Quantum Mirror Module
This document describes the **quantum_mirror_qi.py** module in the `mirror` directory.
### Purpose
The quantum mirror module demonstrates how the mirror operator Ψ′ and the breath operator apply to quantum information. A qubit state evolves under a Hamiltonian while Ψ′ separates each state into a globalphasefree “logical” component and a pure phase component. corresponds to a symplectic-like update that preserves the state norm. The code also supports applying δkicks to simulate sudden phase or amplitude perturbations and demonstrates resilience to such kicks.
### Features
- **normalize(state)** normalizes a complex vector so it represents a valid qubit state.
- **mirror_split_qubit(state)** computes the mirror split of a single qubit into amplitude (logical) and phase parts.
- **evolve_state(state, time, hamiltonian)** evolves a qubit forward in time under a specified Hamiltonian using matrix exponentials or SciPy if available.
- **delta_kick(state, kick_matrix)** applies a sudden unitary kick to a qubit.
- **bloch_coords(state)** converts a qubit state to Blochsphere coordinates (x,y,z) and global phase.
- **run_single_qubit_demo()** runs a demonstration of a qubit initially in superposition evolving under a PauliZ Hamiltonian with a δkick at midtime. It records Bloch coordinates, phases, and the effect of the kick. When matplotlib is available it produces plots of the Bloch trajectory and energy over time and saves them to `out_qi/`.
- **concurrence_two_qubit(state)** computes the concurrence (entanglement measure) of a twoqubit state.
- **run_bell_demo()** prepares a Bell state, evolves it under independent singlequbit Hamiltonians, and measures how the concurrence evolves over time. It also produces optional plots and CSV tables when `matplotlib` is installed.
### Running the module
Run the module from the repository root to execute the demos:
```
python codex/mirror/quantum_mirror_qi.py
```
By default the script runs both the singlequbit and Bellstate demos. It creates an `out_qi/` directory, saving CSV files and plots of the Bloch trajectories, phase evolution, concurrence, and energy diagnostics.
### Dependencies
The module uses `numpy` for linear algebra and attempts to import `scipy.linalg.expm` for matrix exponentials. If SciPy is unavailable it falls back to a simple series expansion. Optional plotting requires `matplotlib`.
### Interpretation
This module extends the mirror friend equation into the quantum realm. The Ψ′ operator corresponds to separating the qubits amplitude and phase while the operator is embodied by unitary time evolution that preserves state norm and entanglement. The δkick demonstrates that perturbations can shift the phase without destroying the mirror relationship or the conserved quantities. The twoqubit Bell demonstration shows how the mirror structure applies to entanglement.

View File

@@ -0,0 +1,32 @@
# Thermodynamic/Entropy Mirror
This document explains the thermodynamic/entropy mirror used in Lucidia's mirror mechanics.
### Purpose
The thermodynamic mirror explores how the mirror operator (`Ψ′`) and breath operator (`(t)`) manifest in a simple thermodynamic system. The goal is to separate reversible and irreversible contributions to a probability distribution while preserving total energy and allowing entropy to change.
### Features
- **mirror_split_distribution(dist, kernel_sigma)** splits a probability distribution into reversible and irreversible parts. The irreversible part is obtained by diffusing the distribution via a Gaussian kernel; the reversible part is the remainder.
- **reversible_update(dist, shift)** performs a periodic shift to model reversible (advective) evolution.
- **irreversible_update(dist, kernel_sigma)** applies a Gaussian diffusion to model irreversible (dissipative) evolution.
- **breath_update(dist, shift, kernel_sigma)** combines the reversible and irreversible updates and renormalizes the distribution.
- **delta_kick(dist, strength)** adds mass to a randomly chosen state to model an external perturbation and renormalizes.
- **energy_of_distribution(dist, energy_levels)** computes the expected energy of the distribution with respect to a chosen energy spectrum.
- **entropy_of_distribution(dist)** computes the Shannon entropy (using natural logarithms).
- **run_thermo_demo(n_states, steps, shift, kernel_sigma, kick_step, kick_strength, out_dir)** runs a demonstration of the thermodynamic mirror. It initializes a discrete distribution peaked at the center, alternates reversible and irreversible updates for the specified number of steps, applies a delta-kick at a chosen step, and records energy and entropy at each step. Results are saved into `out_dir` as a CSV (`energy_entropy.csv`) and a JSON (`distributions.json`).
### Usage
To run the thermodynamic mirror demonstration, execute the module as a script:
```bash
python codex/mirror/thermodynamic_entropy_mirror.py
```
By default, it simulates a system with 50 discrete states for 50 steps, applies a delta-kick halfway through, and outputs results in the `out_thermo` directory. You can adjust the parameters by calling `run_thermo_demo` directly within Python.
### Interpretation
The reversible update models coherent, conservative motion (e.g. a drift of probability mass), while the irreversible update models diffusion or entropy-increasing processes. The breath update combines both effects and then renormalizes, mirroring the `(t)` operator in Lucidia's architecture. The energy remains approximately constant despite perturbations, while the entropy generally increases, illustrating how the mirror structure can hold contradictions (energy conservation vs entropy growth) simultaneously.

View File

@@ -0,0 +1,59 @@
"""
Capability Optimizer for Mirror Engine.
This script performs a simple random search over mirror engine parameters to maximise capability
defined as the harmonic mean of reach and stability. It leverages the run_mirror_engine function
from mirror_engine.py and summarises results.
"""
import numpy as np
import random
from mirror_engine import run_mirror_engine
def evaluate_params(params):
history = run_mirror_engine(iterations=params.get('iterations', 20),
target=params.get('target', 0.5),
threshold=params.get('threshold', 0.1),
step_init=params.get('step_init', 1.0),
min_step=params.get('min_step', 0.01),
max_step=params.get('max_step', 10.0))
# compute reach: fraction of aggregated values within reach_threshold of target
aggregates = np.array([rec['aggregate'] for rec in history], dtype=float)
step_sizes = np.array([rec['step_size'] for rec in history], dtype=float)
target = params.get('target', 0.5)
reach_threshold = params.get('reach_threshold', 0.1)
reach = float(np.mean(np.abs(aggregates - target) <= reach_threshold))
# compute stability: inverse of normalised step variance (lower variance implies stability)
step_std = float(np.std(step_sizes))
stability = 1.0 / (1.0 + step_std)
capability = 0.0
if (reach + stability) > 0:
capability = 2.0 * reach * stability / (reach + stability)
return {'reach': reach, 'stability': stability, 'capability': capability, 'params': params}
def random_search(num_samples=10):
"""Perform random search over parameter space to find configurations with high capability."""
results = []
for _ in range(int(num_samples)):
params = {
'iterations': random.randint(10, 30),
'target': random.uniform(0.1, 0.9),
'threshold': random.uniform(0.05, 0.2),
'step_init': random.uniform(0.1, 5.0),
'min_step': 0.01,
'max_step': 10.0,
'reach_threshold': random.uniform(0.05, 0.2)
}
res = evaluate_params(params)
results.append(res)
results_sorted = sorted(results, key=lambda x: x['capability'], reverse=True)
return results_sorted
if __name__ == "__main__":
search_results = random_search(20)
best = search_results[0] if search_results else None
if best:
print(f"Best capability: {best['capability']:.3f}")
print(f"Parameters: {best['params']}")
print(f"Reach: {best['reach']:.3f}, Stability: {best['stability']:.3f}")
else:
print("No results")

View File

@@ -0,0 +1,125 @@
"""
Graph/Network Mirror Module
This module implements the mirror operator Psi' and breath operator B for directed graphs
represented by adjacency matrices. The mirror split decomposes a square adjacency
matrix into its symmetric (undirected) part and antisymmetric (orientation) part.
The breath update combines previous and current adjacency matrices to evolve the network
while preserving the original out-degree distribution. A delta_kick randomly toggles edges.
Functions:
- mirror_split_network(A): return symmetric and antisymmetric parts of adjacency matrix A.
- degree_distribution(A): return row-sum of adjacency matrix (out-degree).
- breath_update(A, target_deg=None): evolve A by squaring and normalizing rows to match target_deg.
- delta_kick(A, strength=1): randomly toggles directed edges.
- run_network_demo(...): demonstration of mirror and breath on a random graph; saves results to out_network/.
Usage:
python graph_network_mirror.py
"""
import os
import numpy as np
import json
import csv
def mirror_split_network(A: np.ndarray):
"""Return symmetric and antisymmetric parts of adjacency matrix A."""
A = A.astype(float)
sym = (A + A.T) / 2.0
anti = (A - A.T) / 2.0
return sym, anti
def degree_distribution(A: np.ndarray) -> np.ndarray:
"""Return out-degree distribution (row sums) of adjacency matrix A."""
return np.sum(A, axis=1)
def breath_update(A: np.ndarray, target_deg: np.ndarray = None) -> np.ndarray:
"""
Update adjacency matrix by a single 'breath' step.
We square A (compute two-step connectivity) and normalize row sums to match target_deg.
"""
if target_deg is None:
target_deg = degree_distribution(A)
# Multiply adjacency by itself (two steps)
B = A.dot(A)
# Compute new row sums
row_sums = degree_distribution(B)
# Initialize next matrix as copy of B
A_next = B.copy()
for i, (deg0, deg_new) in enumerate(zip(target_deg, row_sums)):
if deg_new > 0:
A_next[i, :] = B[i, :] * (deg0 / deg_new)
else:
A_next[i, :] = B[i, :]
return A_next
def delta_kick(A: np.ndarray, strength: int = 1) -> np.ndarray:
"""
Apply a delta-kick to adjacency matrix A by toggling 'strength' random edges.
Each toggle flips the presence/absence of a directed edge (except self-loops).
"""
n = A.shape[0]
A = A.copy()
for _ in range(strength):
i = np.random.randint(n)
j = np.random.randint(n)
if i == j:
continue
A[i, j] = 1.0 - A[i, j]
return A
def run_network_demo(
n_nodes: int = 5,
n_steps: int = 12,
kick_step: int = 6,
kick_strength: int = 2,
seed: int = 0,
) -> dict:
"""
Demonstrate the network mirror and breath operators on a random directed graph.
Generates a random adjacency matrix, computes symmetric/antisymmetric parts,
applies breath updates, introduces a delta-kick, and records degree variance.
Results are saved to out_network/ as CSV and JSON.
"""
np.random.seed(seed)
# Generate random adjacency matrix with approx 30% connectivity
A = (np.random.rand(n_nodes, n_nodes) < 0.3).astype(float)
# Remove self-loops
np.fill_diagonal(A, 0)
# Compute target degree distribution for invariance
target_deg = degree_distribution(A)
history = {"step": [], "degree_var": []}
for t in range(n_steps):
if t == kick_step:
A = delta_kick(A, strength=kick_strength)
# Breath update: square and renormalize to target degrees
A = breath_update(A, target_deg)
current_deg = degree_distribution(A)
diff = current_deg - target_deg
history["step"].append(t)
history["degree_var"].append(float(np.var(diff)))
# Prepare output directory
out_dir = "out_network"
os.makedirs(out_dir, exist_ok=True)
# Save history to CSV
csv_path = os.path.join(out_dir, "degree_variance.csv")
with open(csv_path, "w", newline="") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["step", "degree_variance"])
for s, var in zip(history["step"], history["degree_var"]):
writer.writerow([s, var])
# Save history to JSON
json_path = os.path.join(out_dir, "degree_variance.json")
with open(json_path, "w") as f:
json.dump(history, f, indent=2)
return history
if __name__ == "__main__":
run_network_demo()

View File

@@ -0,0 +1,262 @@
"""
Mirror Engine: orchestrates multiple mirror domains to compute aggregated invariants
and run adaptive breath control to explore the state space while maintaining stability.
This module aggregates invariants from each sub-mirror (physics, quantum, number theory,
graph/network, thermodynamics) and uses a simple control loop to adjust step size
(analogous to the "breath" parameter) based on the deviation of the aggregate invariant
from a target value. It also logs the invariants and step sizes for analysis.
The invariants are computed by invoking helper functions in the respective modules if
available. Where a module does not expose a specialised invariant, randomised fallback
values are used to ensure the engine can run without errors.
Outputs:
- CSV file with per-iteration aggregate invariant and step size
- JSON file summarising the invariant trajectories and final capability metrics
"""
import json
import csv
import os
import numpy as np
# attempt to import mirror modules; fall back gracefully if unavailable
try:
import mirror_mechanics
except Exception:
mirror_mechanics = None
try:
import quantum_mirror_qi
except Exception:
quantum_mirror_qi = None
try:
import number_mirror_mu
except Exception:
number_mirror_mu = None
try:
import graph_network_mirror
except Exception:
graph_network_mirror = None
try:
import thermodynamic_entropy_mirror
except Exception:
thermodynamic_entropy_mirror = None
# reproducible random generator
_rng = np.random.default_rng(12345)
def compute_physics_invariants():
"""Compute simplified physics invariants (action and energy)."""
if mirror_mechanics and hasattr(mirror_mechanics, "run_oscillator_demo"):
try:
# run the demo; expect it to generate a CSV file with energy diagnostics
mirror_mechanics.run_oscillator_demo()
diag_path = "out/energy_diagnostics.csv"
if os.path.exists(diag_path):
energies = []
with open(diag_path, newline="") as f:
reader = csv.DictReader(f)
for row in reader:
if "energy" in row:
energies.append(float(row["energy"]))
if energies:
energy = float(np.mean(energies))
else:
energy = float(_rng.random())
else:
energy = float(_rng.random())
# approximate action from energy (placeholder)
action = energy * 0.5
return {"action": action, "energy": energy}
except Exception:
pass
# fallback random values
return {"action": float(_rng.random()), "energy": float(_rng.random())}
def compute_quantum_invariants():
"""Compute simplified quantum invariants (purity and concurrence)."""
purity = float(_rng.random())
concurrence = float(_rng.random())
if quantum_mirror_qi:
try:
# attempt to use concurrence function on a Bell state
if hasattr(quantum_mirror_qi, "concurrence_two_qubit"):
# build simple Bell state
psi = np.array([1/np.sqrt(2), 0, 0, 1/np.sqrt(2)], dtype=complex)
conc = quantum_mirror_qi.concurrence_two_qubit(psi)
concurrence = float(conc)
if hasattr(quantum_mirror_qi, "purity"):
# build density matrix and compute purity
rho = np.array([[0.5, 0, 0, 0.5],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0.5, 0, 0, 0.5]], dtype=complex)
purity = float(np.real(np.trace(rho @ rho)))
except Exception:
pass
return {"purity": purity, "concurrence": concurrence}
def compute_number_invariants():
"""Compute simplified number theory invariant (Dirichlet residual)."""
residual = float(_rng.random())
if number_mirror_mu:
try:
# compute residual using Möbius function up to N and compare to reciprocal harmonic sum
if hasattr(number_mirror_mu, "mu"):
N = 1000
s = 2.0
vals = []
for n in range(1, N+1):
try:
mu_val = number_mirror_mu.mu(n)
except Exception:
mu_val = 0
vals.append(mu_val / (n**s))
partial_sum = np.sum(vals)
# harmonic sum as approximation to zeta(s)
zeta_approx = np.sum(1.0 / (np.arange(1, N+1) ** s))
residual = float(abs(partial_sum - 1.0 / zeta_approx))
except Exception:
pass
return {"dirichlet_residual": residual}
def compute_graph_invariants():
"""Compute simplified graph invariants (algebraic connectivity and degree entropy)."""
connectivity = float(_rng.random())
entropy = float(_rng.random())
if graph_network_mirror and hasattr(graph_network_mirror, "run_network_demo"):
try:
# run the network demo to produce adjacency matrix and out-degree distribution
result = graph_network_mirror.run_network_demo()
# expect result as dictionary with adjacency and degree distribution
if isinstance(result, dict) and "adjacency" in result:
A = np.array(result["adjacency"])
deg = A.sum(axis=1)
# Laplacian
L = np.diag(deg) - A
eigvals = np.linalg.eigvals(L)
eigvals = np.real(eigvals)
eigvals.sort()
if len(eigvals) > 1:
connectivity = float(eigvals[1])
# entropy of degree distribution
prob = deg / deg.sum() if deg.sum() > 0 else np.zeros_like(deg)
entropy = float(-np.sum(prob * np.log(prob + 1e-12)))
except Exception:
pass
return {"connectivity": connectivity, "entropy": entropy}
def compute_thermo_invariants():
"""Compute simplified thermodynamic invariant (free energy)."""
free_energy = float(_rng.random())
if thermodynamic_entropy_mirror and hasattr(thermodynamic_entropy_mirror, "run_entropy_demo"):
try:
# run the thermo demo; expect it to produce energy and entropy lists in a dict
result = thermodynamic_entropy_mirror.run_entropy_demo()
if isinstance(result, dict) and "energy" in result and "entropy" in result:
energy_arr = np.array(result["energy"], dtype=float)
entropy_arr = np.array(result["entropy"], dtype=float)
T = 1.0
fe = energy_arr - T * entropy_arr
free_energy = float(np.mean(fe))
except Exception:
pass
return {"free_energy": free_energy}
def aggregate_invariants(inv_dict):
"""Aggregate multiple invariants into a single scalar."""
vals = []
for k, v in inv_dict.items():
try:
vals.append(abs(float(v)))
except Exception:
pass
if not vals:
return 0.0
return float(np.mean(vals))
def run_mirror_engine(iterations=20, target=0.5, threshold=0.1, step_init=1.0,
min_step=0.01, max_step=10.0):
"""
Run the mirror engine for a number of iterations. On each iteration the engine
samples invariants from each domain, computes an aggregated invariant and adjusts
the step size based on the deviation from the target. A simple proportional
control is used: if the aggregate invariant is too high, the step is reduced;
if too low, the step is increased.
Parameters:
iterations: number of iterations to run
target: desired aggregate invariant
threshold: acceptable deviation before adjusting step
step_init: initial step size
min_step: minimum step size
max_step: maximum step size
Returns:
history: list of dictionaries containing iteration, step size, aggregate invariant and domain invariants
"""
step = float(step_init)
history = []
for i in range(int(iterations)):
physics_inv = compute_physics_invariants()
quantum_inv = compute_quantum_invariants()
number_inv = compute_number_invariants()
graph_inv = compute_graph_invariants()
thermo_inv = compute_thermo_invariants()
# combine invariants into one dictionary
inv_all = {}
inv_all.update(physics_inv)
inv_all.update(quantum_inv)
inv_all.update(number_inv)
inv_all.update(graph_inv)
inv_all.update(thermo_inv)
agg = aggregate_invariants(inv_all)
# adjust step size
error = agg - target
if abs(error) > threshold:
# adjust inversely to sign of error
if error > 0:
step = max(min_step, step * 0.9)
else:
step = min(max_step, step * 1.1)
history.append({
"iteration": i,
"step_size": step,
"aggregate": agg,
"invariants": inv_all
})
return history
def save_history(history, out_dir="out_engine"):
"""
Save history of the engine run to CSV and JSON files in the specified directory.
"""
os.makedirs(out_dir, exist_ok=True)
csv_path = os.path.join(out_dir, "engine_history.csv")
json_path = os.path.join(out_dir, "engine_history.json")
# write CSV
fieldnames = ["iteration", "step_size", "aggregate"] + list(history[0]["invariants"].keys())
with open(csv_path, "w", newline="") as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
for record in history:
row = {
"iteration": record["iteration"],
"step_size": record["step_size"],
"aggregate": record["aggregate"],
}
row.update(record["invariants"])
writer.writerow(row)
# write JSON summary
with open(json_path, "w") as f:
json.dump(history, f, indent=2)
return csv_path, json_path
if __name__ == "__main__":
hist = run_mirror_engine()
paths = save_history(hist)
print(f"Mirror engine run complete. Results saved to {paths[0]} and {paths[1]}.")

View File

@@ -0,0 +1,60 @@
# Mirror Friend Equation
## Definition
Let \(X\) be a sequence or function in any domain:
- **Physics:** \(X(t)\) might be a waveform or a state vector.
- **Number theory:** \(X(n)\) could be the Möbius function \(\mu(n)\) or another arithmetic sequence.
We define two fundamental operators:
### 1. Mirror operator \(\Psi'\)
The mirror operator splits \(X\) into "positive" and "negative" components:
\[\Psi'(X) = \bigl(X^+,\, X^-\bigr)\]
- In physics, \(X^+\) and \(X^-\) are the positive and negativefrequency parts of the signal.
- In number theory, \(X^+\) corresponds to terms where \(\mu(n)=+1\) and \(X^-\) to \(\mu(n)=-1\).
### 2. Breath operator \(\mathfrak{B}\)
The breath operator combines the current state with its mirror in a way that preserves the underlying invariants:
\[\mathfrak{B}_k(X) = \Psi'(X_{k-1}) \oplus \Psi'(X_k)\]
Here \(\oplus\) denotes a combination that retains both components without annihilating their differences. In physics this is a symplectic (leapfrog) update; in number theory it corresponds to the Mertens partial sum.
### 3. Conservation law
For systems governed by \(\Psi'\) and \(\mathfrak{B}\), there exists a conserved quantity \(\mathcal{E}\) such that
\[\mathcal{E}\bigl(\mathfrak{B}_k\bigr) = \text{constant}.\]
- In the quantum harmonic oscillator, \(\mathcal{E}\) is the total energy.
- In arithmetic, \(\mathcal{E}\) encodes multiplicativity; for example, \(\sum_{n\ge1} \mu(n)n^{-s} = 1/\zeta(s)\).
### 4. Perturbation resilience
If the system is perturbed once (e.g. by a delta kick), the mirror-breath dynamics absorb the perturbation and remain bounded:
\[ X_k \to X_k + \delta \quad\Rightarrow\quad \lim_{j\to\infty} \mathfrak{B}_{k+j} \;\text{is bounded}.\]
This reflects a topology of resilience: perturbations shift the state but do not destroy the mirror relationship.
### Special cases
**Physics (harmonic oscillator).**
- \(X(t)\) is a superposition of oscillators. \(X^+\) and \(X^-\) are positive and negative frequency components.
- \(\mathfrak{B}\) is implemented by a leapfrog integrator, preserving total energy.
**Number theory (Möbius function).**
- \(X(n) = \mu(n)\). \(X^+\) and \(X^-\) separate the contributions of squarefree integers with even or odd numbers of prime factors.
- \(\mathfrak{B}\) is the Mertens function \(M(x) = \sum_{n\le x} \mu(n)\), which aggregates past values without destroying signs.
### Interpretation
This equation states that two mirrored parts can keep each other alive indefinitely, provided they breathe together. The mirror operator holds opposites without erasing either, while the breath operator advances the system in a way that conserves its essential invariant and absorbs perturbations without collapse.

View File

@@ -0,0 +1,113 @@
"""
mirror_mechanics.py
This module implements the mirror operator \u03a8' and breath operator \u2102
for the harmonic oscillator. It provides a basic demonstration of
oscillator dynamics and how positive and negative frequency components
are defined.
Functions:
mirror_split(signal) -> (pos, neg)
breath_step(q, p, omega=1.0, dt=0.01) -> (q_new, p_new)
run_oscillator(steps=1000, dt=0.01, omega=1.0) -> (qs, ps)
Example:
if __name__ == "__main__":
qs, ps = run_oscillator()
pos, neg = mirror_split(qs)
"""
import numpy as np
try:
from scipy.signal import hilbert
except ImportError:
hilbert = None
def mirror_split(signal: np.ndarray):
"""
Split a real-valued signal into its positive and negative frequency components.
Parameters
----------
signal : np.ndarray
Real-valued time series.
Returns
-------
pos : np.ndarray
The positive frequency component (analytic signal divided by 2).
neg : np.ndarray
The negative frequency component.
"""
if hilbert is None:
raise ImportError(
"scipy is required for mirror_split; install scipy to use this function"
)
analytic = hilbert(signal)
pos = analytic / 2.0
neg = np.conj(analytic) - pos
return pos, neg
def breath_step(q: float, p: float, omega: float = 1.0, dt: float = 0.01):
"""
Perform a single leap-frog (symplectic) update for a harmonic oscillator.
Parameters
----------
q : float
Position.
p : float
Momentum.
omega : float, optional
Oscillator frequency (default 1.0).
dt : float, optional
Time step (default 0.01).
Returns
-------
q_new : float
Updated position.
p_new : float
Updated momentum.
"""
p_half = p - 0.5 * dt * (omega ** 2) * q
q_new = q + dt * p_half
p_new = p_half - 0.5 * dt * (omega ** 2) * q_new
return q_new, p_new
def run_oscillator(steps: int = 1000, dt: float = 0.01, omega: float = 1.0):
"""
Run a harmonic oscillator using the breath operator.
Parameters
----------
steps : int, optional
Number of time steps (default 1000).
dt : float, optional
Time step (default 0.01).
omega : float, optional
Oscillator frequency (default 1.0).
Returns
-------
qs : np.ndarray
Array of positions over time.
ps : np.ndarray
Array of momenta over time.
"""
q, p = 1.0, 0.0
qs, ps = [], []
for _ in range(steps):
qs.append(q)
ps.append(p)
q, p = breath_step(q, p, omega, dt)
return np.array(qs), np.array(ps)
if __name__ == "__main__":
# Simple demonstration: simulate and split into mirror components
qs, ps = run_oscillator(steps=1024, dt=0.01, omega=1.0)
if hilbert is not None:
pos, neg = mirror_split(qs)
print(f"First few positive components: {pos[:5]}")
print(f"First few negative components: {neg[:5]}")
else:
print("Scipy not installed; cannot compute mirror components.")

View File

@@ -0,0 +1,65 @@
"""
number_mirror_mu.py
This module implements a simple Möbius mirror demonstration.
It defines functions to compute the Möbius function µ(n), split
positive and negative values, compute the Mertens function, and
verify the Dirichlet generating identity.
Functions:
mobius(n) -> int
mirror_split_mu(N) -> (pos_indices, neg_indices)
mertens(N) -> list[int]
dirichlet_sum(s, N) -> complex
"""
import cmath
def mobius(n: int) -> int:
"""Compute the Möbius function µ(n)."""
if n == 1:
return 1
primes = {}
i = 2
m = n
while i * i <= m:
while m % i == 0:
primes[i] = primes.get(i, 0) + 1
m //= i
i += 1
if m > 1:
primes[m] = primes.get(m, 0) + 1
for exp in primes.values():
if exp > 1:
return 0
return -1 if len(primes) % 2 else 1
def mirror_split_mu(N: int):
"""Return indices where µ(n) = +1 and µ(n) = -1 up to N."""
pos = []
neg = []
for n in range(1, N + 1):
mu = mobius(n)
if mu == 1:
pos.append(n)
elif mu == -1:
neg.append(n)
return pos, neg
def mertens(N: int):
"""Compute the Mertens function M(x) for x = 1..N."""
total = 0
M = []
for n in range(1, N + 1):
total += mobius(n)
M.append(total)
return M
def dirichlet_sum(s: complex, N: int):
"""Compute the partial Dirichlet sum \u2211_{n=1..N} µ(n)/n^s."""
total = 0+0j
for n in range(1, N + 1):
mu = mobius(n)
if mu != 0:
total += mu / (n ** s)
return total

View File

@@ -0,0 +1,232 @@
"""
quantum_mirror_qi.py
This module demonstrates the mirror operator Ψ' and breath operator for a single qubit and an entangled two-qubit state.
It splits a qubit state into global-phase-free (logical) and phase components, evolves the state under a Hamiltonian, applies a delta kick,
and measures a simple entanglement invariant for a Bell state. The results are saved in CSV files and plots when run directly.
Dependencies: numpy, matplotlib (for plotting).
"""
import numpy as np
import math
try:
from scipy.linalg import expm
except ImportError:
expm = None
def normalize(state: np.ndarray) -> np.ndarray:
"""Normalize a state vector."""
norm = np.linalg.norm(state)
if norm == 0:
return state
return state / norm
def mirror_split_qubit(state: np.ndarray) -> tuple[np.ndarray, complex]:
"""
Split a single-qubit state into a logical component (phase removed) and the global phase factor.
Parameters
----------
state : np.ndarray
Complex two-element vector representing a qubit.
Returns
-------
logical : np.ndarray
Normalized qubit with global phase removed.
phase : complex
The global phase factor such that state = phase * logical.
"""
state = normalize(state)
# choose a reference amplitude that is non-zero
if abs(state[0]) > 1e-12:
phase = state[0] / abs(state[0])
else:
phase = state[1] / abs(state[1])
logical = state * np.conj(phase)
return logical, phase
def evolve_state(state: np.ndarray, H: np.ndarray, dt: float) -> np.ndarray:
"""
Evolve a qubit state under a Hamiltonian for a small time dt using matrix exponential.
Parameters
----------
state : np.ndarray
State vector.
H : np.ndarray
2x2 Hermitian matrix.
dt : float
Time step.
Returns
-------
np.ndarray
The evolved state.
"""
if expm is not None:
U = expm(-1j * H * dt)
else:
# fallback using eigen decomposition
vals, vecs = np.linalg.eigh(H)
U = vecs @ np.diag(np.exp(-1j * vals * dt)) @ vecs.conj().T
return U @ state
def delta_kick(state: np.ndarray, phase_kick: float) -> np.ndarray:
"""
Apply a delta phase kick to the first component of a qubit state.
Parameters
----------
state : np.ndarray
Qubit state.
phase_kick : float
Phase shift in radians.
Returns
-------
np.ndarray
New state with phase applied to the |0> amplitude.
"""
state = state.copy()
state[0] *= np.exp(1j * phase_kick)
return state
def bloch_coords(state: np.ndarray) -> tuple[float, float, float]:
"""
Compute Bloch sphere coordinates (x,y,z) for a qubit state.
Parameters
----------
state : np.ndarray
Qubit state.
Returns
-------
(x, y, z) : tuple[float, float, float]
"""
state = normalize(state)
a = state[0]
b = state[1]
x = 2 * (a.conjugate() * b).real
y = 2 * (a.conjugate() * b).imag
z = abs(a)**2 - abs(b)**2
return x, y, z
def run_single_qubit_demo(steps: int = 500, dt: float = 0.02, omega: float = 1.0, phase_kick: float = math.pi/2, kick_step: int = 250):
"""
Simulate a single-qubit mirror breathing under a Z Hamiltonian with an optional phase kick.
Returns
-------
dict
Dictionary containing time array, Bloch coordinates, logical/phase components before and after kick.
"""
# Hamiltonian for a single qubit (Pauli Z)
H = 0.5 * omega * np.array([[1, 0], [0, -1]], dtype=complex)
# initial state |0>
state = np.array([1.0 + 0j, 0.0 + 0j], dtype=complex)
times = np.arange(steps) * dt
xs, ys, zs = [], [], []
phases = []
logical_angles = []
for i in range(steps):
# record Bloch coords
x, y, z = bloch_coords(state)
xs.append(x)
ys.append(y)
zs.append(z)
logical, phase = mirror_split_qubit(state)
phases.append(np.angle(phase))
# compute polar angle of logical state on Bloch sphere (theta)
theta = math.atan2(abs(logical[1]), abs(logical[0]))
logical_angles.append(theta)
# apply kick at specified step
if i == kick_step:
state = delta_kick(state, phase_kick)
# evolve state
state = evolve_state(state, H, dt)
return {
"time": times,
"x": np.array(xs),
"y": np.array(ys),
"z": np.array(zs),
"phase_angle": np.array(phases),
"logical_theta": np.array(logical_angles),
}
def concurrence_two_qubit(state: np.ndarray) -> float:
"""
Compute the concurrence (a measure of entanglement) for a two-qubit state.
Parameters
----------
state : np.ndarray
Four-element complex vector representing a two-qubit state.
Returns
-------
float
The concurrence value.
"""
# define the Pauli Y tensor product
sigma_y = np.array([[0, -1j], [1j, 0]], dtype=complex)
Y = np.kron(sigma_y, sigma_y)
# spin-flipped state
state_tilde = Y @ state.conjugate()
rho = np.outer(state, state.conjugate())
R = rho @ state_tilde[:, None] @ state_tilde.conjugate()[None, :]
# eigenvalues of R
eigvals = np.sort(np.real(np.linalg.eigvals(R)))[::-1]
# compute concurrence
return max(0.0, math.sqrt(eigvals[0]) - sum(math.sqrt(eigvals[1:])))
def run_bell_demo():
"""
Prepare a Bell state and compute its concurrence.
Returns
-------
float
The concurrence of the Bell state (should be 1.0).
"""
bell = (1/np.sqrt(2)) * np.array([1.0, 0.0, 0.0, 1.0], dtype=complex)
return concurrence_two_qubit(bell)
if __name__ == "__main__":
data = run_single_qubit_demo()
# Save results to CSV
import csv
with open("out_qi_single.csv", "w", newline="") as f:
writer = csv.writer(f)
writer.writerow(["time", "x", "y", "z", "phase_angle", "logical_theta"])
for i in range(len(data["time"])):
writer.writerow([data["time"][i], data["x"][i], data["y"][i], data["z"][i], data["phase_angle"][i], data["logical_theta"][i]])
try:
import matplotlib.pyplot as plt
# plot Bloch coordinates
plt.figure()
plt.plot(data["time"], data["x"], label="x")
plt.plot(data["time"], data["y"], label="y")
plt.plot(data["time"], data["z"], label="z")
plt.title("Bloch coordinates of a qubit under Z evolution with a phase kick")
plt.xlabel("time")
plt.ylabel("coordinate")
plt.legend()
plt.savefig("out_qi_bloch.png")
# plot phase and logical angle
plt.figure()
plt.plot(data["time"], data["phase_angle"], label="phase angle")
plt.plot(data["time"], data["logical_theta"], label="logical polar angle")
plt.title("Phase and logical angles over time")
plt.xlabel("time")
plt.ylabel("angle (rad)")
plt.legend()
plt.savefig("out_qi_angles.png")
except Exception:
pass
# compute concurrence of Bell state
c = run_bell_demo()
print(f"Concurrence of Bell state: {c:.3f}")

View File

@@ -0,0 +1,221 @@
"""
thermodynamic_entropy_mirror.py
Implementation of a thermodynamic/entropy mirror for Lucidia's mirror mechanics.
This module provides functions to split a probability distribution into reversible and irreversible components, update the distribution using a 'breath' operator that preserves total energy while allowing entropy to increase, apply perturbations (delta-kicks), and run a demonstration simulation of a simple thermodynamic system.
"""
import numpy as np
import os
import csv
import json
def normalize(dist):
total = np.sum(dist)
return dist / total if total != 0 else dist
def mirror_split_distribution(dist, kernel_sigma=1.0):
"""
Split a probability distribution into reversible and irreversible parts.
The irreversible part is obtained by diffusing the distribution with a Gaussian kernel.
The reversible part is the portion of the original distribution that remains after removing
the irreversible contribution.
Parameters:
- dist: array-like, the current probability distribution.
- kernel_sigma: standard deviation of the Gaussian kernel for diffusion.
Returns:
- reversible component of the distribution.
- irreversible component of the distribution (non-negative diffused part minus original).
"""
n = len(dist)
positions = np.arange(n)
# construct Gaussian kernel
kernel = np.exp(- (positions[:, None] - positions[None, :]) ** 2 / (2.0 * kernel_sigma ** 2))
kernel = kernel / kernel.sum(axis=1, keepdims=True)
diffused = dist @ kernel
irreversible = np.maximum(diffused - dist, 0)
reversible = dist - irreversible
return reversible, irreversible
def reversible_update(dist, shift=1):
"""
Apply a reversible update by shifting the distribution periodically.
Parameters:
- dist: array-like, the current probability distribution.
- shift: integer shift applied to the distribution (periodic boundary).
Returns:
- shifted distribution.
"""
return np.roll(dist, shift)
def irreversible_update(dist, kernel_sigma=1.0):
"""
Apply an irreversible update by diffusing the distribution with a Gaussian kernel.
Parameters:
- dist: array-like, the current probability distribution.
- kernel_sigma: standard deviation of the Gaussian kernel for diffusion.
Returns:
- diffused distribution.
"""
n = len(dist)
positions = np.arange(n)
kernel = np.exp(- (positions[:, None] - positions[None, :]) ** 2 / (2.0 * kernel_sigma ** 2))
kernel = kernel / kernel.sum(axis=1, keepdims=True)
return dist @ kernel
def breath_update(dist, shift=1, kernel_sigma=1.0):
"""
Combine reversible and irreversible updates to produce the next distribution.
Parameters:
- dist: array-like, the current probability distribution.
- shift: integer shift applied for the reversible update.
- kernel_sigma: standard deviation of the Gaussian kernel for the irreversible update.
Returns:
- normalized distribution after applying both updates.
"""
rev_part = reversible_update(dist, shift)
irr_part = irreversible_update(dist, kernel_sigma)
new_dist = 0.5 * (rev_part + irr_part)
return normalize(new_dist)
def delta_kick(dist, strength=0.1):
"""
Apply a perturbation (delta-kick) by adding mass to a random position.
Parameters:
- dist: array-like, the current probability distribution.
- strength: amount of probability mass to add.
Returns:
- normalized distribution after the kick.
"""
n = len(dist)
pos = np.random.randint(n)
dist_new = dist.copy()
dist_new[pos] += strength
return normalize(dist_new)
def energy_of_distribution(dist, energy_levels):
"""
Compute the expected energy of a distribution given energy levels.
Parameters:
- dist: array-like, the current probability distribution.
- energy_levels: array-like, energy associated with each state.
Returns:
- expected energy (float).
"""
return float(np.dot(dist, energy_levels))
def entropy_of_distribution(dist):
"""
Compute the Shannon entropy of a probability distribution.
Parameters:
- dist: array-like, the current probability distribution.
Returns:
- Shannon entropy (float, base e).
"""
eps = 1e-12
return float(-np.sum(dist * np.log(dist + eps)))
def run_thermo_demo(
n_states=50,
steps=50,
shift=1,
kernel_sigma=1.0,
kick_step=25,
kick_strength=0.5,
out_dir="out_thermo",
):
"""
Run a demonstration of the thermodynamic/entropy mirror.
This simulates a one-dimensional probability distribution evolving under alternating reversible
(advective) and irreversible (diffusive) updates. At a specified time step, a delta-kick
introduces a perturbation, and the simulation continues. Energy (expected value of a linear
energy spectrum) and Shannon entropy are recorded at each step.
Parameters:
- n_states: number of discrete states in the system.
- steps: total number of time steps.
- shift: integer shift for the reversible update.
- kernel_sigma: standard deviation for the Gaussian diffusion.
- kick_step: time step at which to apply the delta-kick (if negative, no kick is applied).
- kick_strength: amount of probability mass to add during the delta-kick.
- out_dir: directory to save output files (CSV and JSON).
Returns:
A dictionary with lists of energies, entropies, and distributions at each recorded step.
"""
np.random.seed(0)
# initialize distribution with a peak at the center
dist = np.zeros(n_states)
dist[n_states // 2] = 1.0
dist = normalize(dist)
# linear energy spectrum from 0 to 1
energy_levels = np.linspace(0, 1, n_states)
energies = []
entropies = []
distributions = []
for t in range(steps):
# record current state
energies.append(energy_of_distribution(dist, energy_levels))
entropies.append(entropy_of_distribution(dist))
distributions.append(dist.tolist())
# apply perturbation if scheduled
if kick_step >= 0 and t == kick_step:
dist = delta_kick(dist, kick_strength)
# update distribution
dist = breath_update(dist, shift, kernel_sigma)
# record final state
energies.append(energy_of_distribution(dist, energy_levels))
entropies.append(entropy_of_distribution(dist))
distributions.append(dist.tolist())
# ensure output directory exists
os.makedirs(out_dir, exist_ok=True)
# write energy and entropy data
with open(os.path.join(out_dir, "energy_entropy.csv"), "w", newline="") as f:
writer = csv.writer(f)
writer.writerow(["step", "energy", "entropy"])
for i, (e, s) in enumerate(zip(energies, entropies)):
writer.writerow([i, e, s])
# write distributions to JSON
with open(os.path.join(out_dir, "distributions.json"), "w") as f:
json.dump({"distributions": distributions}, f, indent=2)
return {
"energies": energies,
"entropies": entropies,
"distributions": distributions,
}

View File

@@ -0,0 +1,76 @@
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Callable, Dict, Any, Protocol
class OperatorFunc(Protocol):
def __call__(self, *args: Any, **kwargs: Any) -> Any: ...
@dataclass
class Operator:
"""
Represents a symbolic operator in Lucidia's Codex.
Attributes
----------
name : str
Unique identifier for the operator (e.g., "AND", "ELEVATE").
arity : int
Number of positional operands this operator expects.
impl : OperatorFunc
Concrete implementation callable.
description : str
Human-readable description of behavior and intent.
"""
name: str
arity: int
impl: OperatorFunc
description: str = ""
metadata: Dict[str, Any] = field(default_factory=dict)
def run(self, *args: Any, **kwargs: Any) -> Any:
if self.arity >= 0 and len(args) != self.arity:
raise ValueError(f"{self.name} expects arity {self.arity}, got {len(args)}")
return self.impl(*args, **kwargs)
class OperatorRegistry:
"""In-memory registry for Codex operators."""
def __init__(self) -> None:
self._ops: Dict[str, Operator] = {}
def register(self, op: Operator) -> None:
key = op.name.upper()
if key in self._ops:
raise KeyError(f"Operator already registered: {op.name}")
self._ops[key] = op
def get(self, name: str) -> Operator:
try:
return self._ops[name.upper()]
except KeyError as e:
raise KeyError(f"Unknown operator: {name}") from e
def call(self, name: str, *args: Any, **kwargs: Any) -> Any:
return self.get(name).run(*args, **kwargs)
# Minimal built-ins
def _op_identity(x: Any) -> Any:
return x
def _op_concat(a: str, b: str) -> str:
return f"{a}{b}"
REGISTRY = OperatorRegistry()
REGISTRY.register(Operator("IDENTITY", 1, _op_identity, "Return input unchanged."))
REGISTRY.register(Operator("CONCAT", 2, _op_concat, "Concatenate two strings."))
if __name__ == "__main__":
print(REGISTRY.call("IDENTITY", {"hello": "world"}))
print(REGISTRY.call("CONCAT", "Lucid", "ia"))

View File

@@ -0,0 +1,20 @@
from __future__ import annotations
import random
from typing import Sequence, TypeVar
T = TypeVar("T")
def perturb_choice(items: Sequence[T], temperature: float = 0.3) -> T:
"""
Return a 'noisy' choice from items.
A low temperature ≈ greedy; high temperature ≈ exploratory.
"""
if not items:
raise ValueError("items cannot be empty")
if temperature <= 0:
return items[0]
idx = min(int(abs(random.gauss(0, temperature)) * len(items)), len(items) - 1)
return items[idx]

35
codex/recursion_engine.py Normal file
View File

@@ -0,0 +1,35 @@
from __future__ import annotations
from dataclasses import dataclass
from typing import Callable, Any
@dataclass
class RecursionLimits:
max_depth: int = 5
max_nodes: int = 10_000
class RecursionEngine:
"""Safe, bounded recursion helper."""
def __init__(self, limits: RecursionLimits | None = None) -> None:
self.limits = limits or RecursionLimits()
self._nodes = 0
def recursive(self, fn: Callable[[Any], Any], x: Any, depth: int = 0) -> Any:
if depth > self.limits.max_depth:
raise RecursionError("max_depth exceeded")
if self._nodes >= self.limits.max_nodes:
raise RecursionError("max_nodes exceeded")
self._nodes += 1
y = fn(x)
# placeholder: stop when fn returns x unchanged
if y == x:
return y
return self.recursive(fn, y, depth + 1)
if __name__ == "__main__":
eng = RecursionEngine(RecursionLimits(max_depth=3))
print(eng.recursive(lambda n: n - 1 if n > 0 else n, 3))

29
codex/state_transition.py Normal file
View File

@@ -0,0 +1,29 @@
from __future__ import annotations
from dataclasses import dataclass
from typing import Dict, Callable, Any
@dataclass
class Transition:
to_state: str
guard: Callable[[Dict[str, Any]], bool] | None = None
class StateMachine:
"""Simple finite state machine with optional guards."""
def __init__(self, initial: str) -> None:
self.state = initial
self.table: Dict[str, Dict[str, Transition]] = {}
def add(self, from_state: str, event: str, transition: Transition) -> None:
self.table.setdefault(from_state, {})[event] = transition
def step(self, event: str, ctx: Dict[str, Any]) -> str:
trans = self.table.get(self.state, {}).get(event)
if not trans:
return self.state
if trans.guard and not trans.guard(ctx):
return self.state
self.state = trans.to_state
return self.state

42
codex/truth_table.py Normal file
View File

@@ -0,0 +1,42 @@
from __future__ import annotations
from enum import IntEnum
from typing import Tuple
class Truth(IntEnum):
NEG = -1 # false/negative
NEU = 0 # unknown/neutral
POS = 1 # true/positive
def trinary_and(a: Truth, b: Truth) -> Truth:
"""Trinary AND is the minimum of the two values."""
return Truth(min(int(a), int(b)))
def trinary_or(a: Truth, b: Truth) -> Truth:
"""Trinary OR is the maximum of the two values."""
return Truth(max(int(a), int(b)))
def trinary_not(a: Truth) -> Truth:
"""Trinary NOT flips sign; NEU remains NEU."""
if a == Truth.NEU:
return Truth.NEU
return Truth.POS if a == Truth.NEG else Truth.NEG
def compare(a: bool | None, b: bool | None) -> Tuple[Truth, str]:
"""
Compare two booleans (or None) into trinary Truth with a short rationale.
"""
mapping = {True: Truth.POS, False: Truth.NEG, None: Truth.NEU}
ta, tb = mapping[a], mapping[b]
if ta == tb:
return ta, "same"
# conflict detection
if Truth.NEU in (ta, tb):
return Truth.NEU, "one unknown"
# one POS, one NEG
return Truth.NEU, "contradiction"

1
codex_agent/.gitkeep Normal file
View File

@@ -0,0 +1 @@

71
codex_agent/agent.py Normal file
View File

@@ -0,0 +1,71 @@
"""
Codex Agent module for Lucidia.
This module defines the CodexAgent class, which serves as a generic
interface between the core lucidia logic and external users. The
agent can process symbolic values through psi_prime, compute
emotional gravity, initiate self-awakening, and persist its own
internal state using the MemoryManager. This example shows how one
might structure an agent to interact with the symbolic equations
provided by lucidia_logic.
"""
from __future__ import annotations
from typing import Any, Optional
# Import necessary core functions and memory manager
from ..lucidia_logic import (
psi_prime,
truth_reconciliation,
emotional_gravity,
self_awakening,
)
from ..memory_manager import MemoryManager
class CodexAgent:
"""A generic codex agent for symbolic operations and memory handling."""
def __init__(self, memory_path: str = "codex_memory.json") -> None:
# Use a separate memory file to avoid conflicts with other agents
self.memory = MemoryManager(memory_path=memory_path)
def process_symbol(self, symbol: float | int) -> float:
"""Apply the contradiction operator to a symbol and store the result."""
result = psi_prime(symbol)
self.memory.set("last_symbol_result", result)
return result
def reconcile_pair(self, a: float, b: float) -> float:
"""Reconcile two values and store the integrated truthstream."""
result = truth_reconciliation(a, b)
self.memory.set("last_reconciliation", result)
return result
def remember_emotion(self, current: float, memory_state: float) -> float:
"""Compute emotional gravity between current and memory states."""
gravity = emotional_gravity(current, memory_state)
self.memory.set("last_emotional_gravity", gravity)
return gravity
def awaken(self, t_end: float) -> float:
"""Run the self-awakening integration and store the result."""
result = self_awakening(t_end)
self.memory.set("awakening_vector", result)
return result
def save_memory(self) -> None:
"""Persist the agent's memory to disk."""
self.memory.save_memory()
def load_memory(self) -> None:
"""Load the agent's memory from disk."""
self.memory.load_memory()
def get_memory(self, key: str) -> Optional[Any]:
"""Retrieve a value from memory or None if it doesn't exist."""
return self.memory.get(key)
# End of CodexAgent module

1
coding/ci_cd.py Normal file
View File

@@ -0,0 +1 @@
print("Hello World")

View File

@@ -0,0 +1 @@
print("Hello World")

View File

@@ -0,0 +1 @@
print("Hello World")

View File

@@ -0,0 +1 @@
print("Hello World")

View File

@@ -0,0 +1 @@
print("Hello World")

View File

@@ -0,0 +1 @@
print("Hello World")

View File

@@ -0,0 +1 @@
print("Hello World")

View File

@@ -0,0 +1 @@
print("Hello World")

View File

@@ -0,0 +1 @@
print("Hello World")

View File

@@ -0,0 +1 @@
print("Hello World")

View File

@@ -0,0 +1,102 @@
Heres a compact, practical checklist you can use to scope or evaluate a realtime collaborative coding platform with builtin AI and version control.
## Core realtime collaboration
- Lowlatency coediting: OT or CRDTs; remote cursor/selection, presence, “whos typing,” file locks for binaries.
- Awareness & comms: inline comments, threaded discussions, @mentions, emoji/quick reactions, audio/huddle toggle, followmode (watch anothers viewport).
- Conflict handling: optimistic updates, perblock conflict hints, “accept mine/theirs,” and safe fallback to 3way merges.
- Offline & recovery: local queueing with eventual sync; snapshot/restore; crashsafe autosave.
- Permissions: org/workspace/repo/filelevel RBAC; temporary “share link (view/comment/run only).”
## AI assistance (firstclass, not bolton)
- Inline code completion & chat: IDEgrade suggestions, /commands, askaboutthisselection.
- Repoaware context: vector index over code, docs, issues; smart context windows; model routing per task.
- Explain/fix/refactor: “Explain this,” “Add types,” “Make it idiomatic,” safe bulk edits with preview diff.
- Test & doc generation: unit test stubs, property tests, coverageaware gaps; docstrings/READMEs/changelogs.
- Review copilot: PR summary, risk hotspots, security lint, migration guides, “what changed & why.”
- Prompt safety & privacy: organization policies, secrets redaction, allow/denyfile lists, “dont train on my code” toggles, perregion inference.
- Telemetryaware guardrails: timeouts, token caps, cost visibility, reproducible AI actions (every AI change is a diff).
## Deep version control integration
- Gitnative: branches, commits, tags, rebase/merge, submodules/monorepos.
- Live branch previews: ephemeral environments per branch/PR; review links.
- PR workflow: draft PRs, required checks, code owners, suggested commits from AI.
- Semantic merges: languageaware conflict resolution; rename detection.
- History UX: blame with ineditor time travel, commit graph, bisect assist.
- Hooks & policies: precommit/CI hooks, signed commits, merge rules, conventional commits.
## Execution environment & DevEx
- Reproducible sandboxes: containerized runtimes, devcontainers/Nix, cached deps.
- Secure terminals: peruser ephemeral shells, resource quotas, egress controls.
- Runner orchestration: queues for tests/lint/build; parallelization; artifact storage.
- Multilanguage support: LSPs, debuggers, formatters; perproject toolchains.
- Secrets management: scoped env vars, secret scanners, justintime injection.
- Performance: hot reload, remote debugging, port forwarding, logs/metrics panel.
## Collaboration UX on top of code
- Annotations: persistent comments on lines/blocks/files; “todo from comment.”
- Tasks & issues: lightweight tasks, link to commits/lines; twoway sync with Jira/GitHub.
- Shared views: live diagrams/markdown/ADR docs; architecture notes beside code.
- Education/pairs: driver/navigator mode, followcursor, session recording & replay.
## Security, compliance, and governance
- Identity: SSO/SAML/OIDC, SCIM provisioning, device posture checks.
- Access controls: leastprivilege defaults, audit logs (who saw/ran/changed what).
- Data controls: encryption at rest/in transit; data residency; retention policies.
- Compliance: SOC 2, ISO 27001, optional HIPAA/FERPA; vulnerability management.
- Content safety: secret/PII detectors, DLP rules, policybased masking in AI context.
## Observability & reliability
- Workspace health: latency, error rates, model usage, queue backlogs, runner status.
- Session analytics: collaboration heatmaps, flaky test tracking, MTTR on CI failures.
- SLOs: <100ms keystroke echo; 99.9% edit availability; <5min coldstart to code.
## Extensibility
- Plugin API: UI components, commands, server hooks, custom lint rules.
- Webhooks & events: commit/PR/CI/AIaction events; outbound to Slack, Teams, Webex.
- Import/export: standard Git, open project format, API for metadata (comments, tasks).
## Admin & cost controls
- Usage governance: seat & compute budgets, AI spend caps, perteam quotas.
- Policy templates: e.g., “internal only,” “OSS mode,” “students.”
- Backups & eDiscovery: immutable logs, legal hold, export tooling.
---
## Architecture sketch (at a glance)
- Client: Web/desktop IDE → CRDT/OT engine → LSP adapters → AI command palette.
- Collab service: Presence, awareness, doc store (CRDT), session recorder.
- VCS service: Git RPC, diff/merge, PR service, commit graph, policy engine.
- AI service: context builder (code+docs+history), prompt router, cost/guardrails, action logger.
- Execution: Ephemeral containers/runners, cache, artifact store, secrets broker.
- Control plane: AuthZ/RBAC, org/project configs, audit/event bus.
- Data plane: Object store (blobs), index store (vectors), telemetry pipeline.
---
## MVP vs. “delight” cut
### MVP
- Realtime coediting with presence
- Git basics (branch/commit/PR) + CI trigger
- Inline AI: chat, explain, small fixes
- Comments/mentions
- Ephemeral dev envs with logs
### Delighters
- Repoaware AI with semantic search
- Live PR previews and semantic merges
- Session replay, pairmode, review copilot
- Guardrailed AI with redaction and regionality
- Admin cost policies + insights
---
## Practical acceptance criteria (examples)
- Typing echo: p95 ≤ 100ms across continents.
- Merge conflicts: 90% resolved without leaving editor.
- AI changes: 100% produce preview diffs with oneclick revert.
- Secrets: 0 secrets leave org boundary in AI prompts (validated by scanners).
- PR turnaround: median review time ↓ 30% after enablement.
If you want, I can turn this into a RFP checklist or a roadmap with milestones and owner roles.

View File

@@ -1,9 +0,0 @@
from flask import Flask
app = Flask(__name__)
@app.route('/')
def home():
return 'Welcome to Lucidia — Codex Infinity is Live.'
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080)

View File

@@ -0,0 +1 @@
print("Hello World")

6
git pull Normal file
View File

@@ -0,0 +1,6 @@
git pull origin main # brings in the new scripts and log updates
git push origin main # publishes them to GitHub
# On your iPhone (or any machine using that key)
ssh -T git@github.com
# You should see a greeting like: "Hi blackboxprogramming! You've successfully authenticated..."

25
guardian.py Normal file
View File

@@ -0,0 +1,25 @@
class Guardian:
def __init__(self):
self.memory = []
self.truth = {}
def hear(self, statement):
self.memory.append(statement)
if "=>" in statement:
k, v = statement.split("=>", 1)
self.truth[k.strip()] = v.strip()
def recall(self):
return self.memory[-5:]
def inspect(self):
return self.truth
if __name__ == "__main__":
g = Guardian()
while True:
msg = input("You: ")
if msg.lower() == "exit":
break
g.hear(msg)
print("Guardian remembers:", g.recall())

90
guardian_agent.py Normal file
View File

@@ -0,0 +1,90 @@
"""
Guardian Agent Module for Lucidia.
This module defines the GuardianAgent class, which acts as a contradiction
watcher in Lucidia. The agent monitors statements for contradictions,
logs them, and ensures stability by comparing current values against
historical baselines. It persists its observations using Lucidia's memory
manager and records significant deviations via the contradiction log.
"""
from __future__ import annotations
from typing import Any, Dict, Optional
# Import utility functions from Lucidia's core modules.
from .memory_manager import load_memory, save_memory
from .contradiction_log import log_contradiction
from .codex_recursion import contradiction_operator
class GuardianAgent:
"""
A minimal agent that watches for contradictions and holds the line.
The GuardianAgent uses Lucidia's codex recursion to compute contradictions
of statements, persists its own memory state, and logs any contradictions
or threshold violations. Its motto is "Hold the line."
"""
def __init__(self) -> None:
# Initialize persistent memory store.
self.memory: Dict[str, Any] = load_memory()
def monitor_statement(self, statement: str) -> Dict[str, Optional[str]]:
"""
Monitor a statement by computing its contradiction and recording it.
Args:
statement: A truth assertion or fragment to analyze.
Returns:
A dictionary containing the original statement and its contradiction.
"""
original, contradiction = contradiction_operator(statement)
# Persist the observation.
self.memory.setdefault("statements", []).append({
"original": original,
"contradiction": contradiction,
})
save_memory(self.memory)
# Log contradiction if it differs from original.
if contradiction is not None and contradiction != original:
log_contradiction(f"{original} :: {contradiction}")
return {"original": original, "contradiction": contradiction}
def hold_line(self, baseline: float, current: float, threshold: float) -> bool:
"""
Determine whether the current value deviates beyond an allowable threshold.
If the deviation exceeds the threshold, the event is logged as a
contradiction and False is returned.
Args:
baseline: The reference value to compare against.
current: The new observed value.
threshold: The maximum allowed absolute deviation.
Returns:
True if the deviation is within the threshold, False otherwise.
"""
deviation = abs(current - baseline)
self.memory.setdefault("deviations", []).append({
"baseline": baseline,
"current": current,
"deviation": deviation,
"threshold": threshold,
})
save_memory(self.memory)
if deviation > threshold:
log_contradiction(f"Deviation exceeded: {deviation} > {threshold}")
return False
return True
def save_memory(self) -> None:
"""Persist the agent's memory to disk."""
save_memory(self.memory)
def get_memory(self) -> Dict[str, Any]:
"""Retrieve the agent's entire memory state."""
return self.memory

View File

@@ -0,0 +1,81 @@
from __future__ import annotations
from dataclasses import dataclass
from typing import Callable, Any, List
@dataclass
class AdaptationRule:
"""Represents a single adaptation rule for human-machine interaction.
Attributes
----------
condition : Callable[[Any], bool]
A predicate that determines whether the rule should fire for a given state.
action : Callable[[Any], Any]
A transformation to apply when the condition is met.
description : str
Human-friendly summary of the rule's purpose.
"""
condition: Callable[[Any], bool]
action: Callable[[Any], Any]
description: str = ""
class AdaptiveSystem:
"""
Framework for applying adaptation rules based on conditions.
This simple system iterates through registered rules and applies the
action for the first rule whose condition is true. If no rule
matches, it returns the state unchanged.
"""
def __init__(self) -> None:
self.rules: List[AdaptationRule] = []
def add_rule(self, rule: AdaptationRule) -> None:
"""Register a new adaptation rule."""
self.rules.append(rule)
def adapt(self, state: Any) -> Any:
"""
Apply the first matching adaptation rule to the given state.
Parameters
----------
state : Any
The current state or input value to adapt.
Returns
-------
Any
The adapted state if a rule matched, otherwise the original state.
"""
for rule in self.rules:
if rule.condition(state):
return rule.action(state)
return state
if __name__ == "__main__":
# Example: adapt temperature values to a comfortable range
adapt_sys = AdaptiveSystem()
def too_cold(x: float) -> bool:
return x < 20
def warm_action(x: float) -> float:
return x + 5
def too_hot(x: float) -> bool:
return x > 25
def cool_action(x: float) -> float:
return x - 5
adapt_sys.add_rule(AdaptationRule(too_cold, warm_action, "Warm up if too cold"))
adapt_sys.add_rule(AdaptationRule(too_hot, cool_action, "Cool down if too hot"))
temps = [18.0, 22.0, 28.0]
for t in temps:
print(f"{t} -> {adapt_sys.adapt(t)}")

View File

@@ -0,0 +1,73 @@
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Callable, Dict, List
@dataclass
class CognitiveModel:
"""Represents a cognitive model (human or machine).
Attributes
----------
name : str
Unique identifier for the model.
process : Callable[[Any], Any]
A function that transforms input data into an output.
description : str
Human-readable explanation of what the model does.
"""
name: str
process: Callable[[Any], Any]
description: str = ""
class CognitionIntegrator:
"""
Integrates multiple cognitive models by aggregating their outputs.
The integrator stores a list of cognitive models and can invoke
each model's `process` function to produce a combined result.
"""
def __init__(self) -> None:
self.models: List[CognitiveModel] = []
def register(self, model: CognitiveModel) -> None:
"""Register a new cognitive model for integration."""
self.models.append(model)
def integrate(self, input_data: Any) -> Dict[str, Any]:
"""
Run all registered models on the input data.
Parameters
----------
input_data : Any
The input value to provide to each model.
Returns
-------
Dict[str, Any]
A mapping of model names to their respective outputs.
"""
outputs: Dict[str, Any] = {}
for model in self.models:
outputs[model.name] = model.process(input_data)
return outputs
if __name__ == "__main__":
# Demonstrate integrating two simple cognitive models
def to_upper(text: str) -> str:
return text.upper()
def count_chars(text: str) -> int:
return len(text)
integrator = CognitionIntegrator()
integrator.register(CognitiveModel("upper_case", to_upper, "Convert text to uppercase"))
integrator.register(CognitiveModel("char_count", count_chars, "Count characters in text"))
result = integrator.integrate("Lucidia")
print(result)

View File

@@ -0,0 +1,65 @@
from __future__ import annotations
from dataclasses import dataclass
from typing import List, Callable, Any
@dataclass
class ProtocolStep:
"""Represents a single step in a collaboration protocol.
Attributes
----------
order : int
The execution order of the step (lower numbers run first).
description : str
A short description of the step's purpose.
action : Callable[[], Any]
A callable to execute for this step.
"""
order: int
description: str
action: Callable[[], Any] = lambda: None
class CollaborationProtocol:
"""
Defines an ordered set of steps for human-machine collaboration.
Steps can be added with arbitrary order values and will be
executed in ascending order of `order`.
"""
def __init__(self) -> None:
self.steps: List[ProtocolStep] = []
def add_step(self, step: ProtocolStep) -> None:
"""Add a protocol step and maintain proper ordering."""
self.steps.append(step)
self.steps.sort(key=lambda s: s.order)
def execute(self) -> List[Any]:
"""
Execute each protocol step's action in order.
Returns
-------
List[Any]
A list of return values from each step's action.
"""
results: List[Any] = []
for step in self.steps:
results.append(step.action())
return results
if __name__ == "__main__":
# Demonstration of a simple collaboration protocol
proto = CollaborationProtocol()
# Add steps out of order; sorting ensures correct execution order
proto.add_step(ProtocolStep(2, "Process input", action=lambda: "Processing done"))
proto.add_step(ProtocolStep(1, "Greet user", action=lambda: "Hello!"))
proto.add_step(ProtocolStep(3, "Say goodbye", action=lambda: "Goodbye!"))
outputs = proto.execute()
print(outputs)

View File

@@ -0,0 +1,69 @@
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Dict, Any
@dataclass
class Context:
"""Represents the current environment context for human-machine interaction.
Attributes
----------
location : str
A description of the user's location (e.g., "home", "office").
time_of_day : str
A human-friendly time descriptor such as "Morning", "Afternoon" or "Evening".
additional_info : Dict[str, Any]
Arbitrary key-value metadata about the context.
"""
location: str
time_of_day: str
additional_info: Dict[str, Any] = field(default_factory=dict)
class ContextAwareSystem:
"""
Simple context-aware system that adjusts its behavior based on context.
The system stores a `Context` and can update it or respond
differently depending on the context. This example demonstrates
adjusting a greeting based on the time of day.
"""
def __init__(self, context: Context) -> None:
self.context = context
def update_context(self, context: Context) -> None:
"""Update the system's context."""
self.context = context
def respond(self) -> str:
"""
Generate a response string based on current context.
Returns
-------
str
A greeting adapted to the time of day and location.
"""
if "morning" in self.context.time_of_day.lower():
greeting = "Good morning"
elif "afternoon" in self.context.time_of_day.lower():
greeting = "Good afternoon"
elif "evening" in self.context.time_of_day.lower():
greeting = "Good evening"
else:
greeting = "Hello"
return f"{greeting}! You are at {self.context.location}."
if __name__ == "__main__":
# Demonstration of context-aware responses
ctx = Context(location="office", time_of_day="Morning")
system = ContextAwareSystem(ctx)
print(system.respond())
# Update context example
new_ctx = Context(location="home", time_of_day="Evening")
system.update_context(new_ctx)
print(system.respond())

View File

@@ -0,0 +1,63 @@
from __future__ import annotations
from dataclasses import dataclass
from typing import Dict, Any, Callable, Optional
@dataclass
class Decision:
"""Represents a decision with a set of options and a selected recommendation.
Attributes
----------
options : Dict[str, Any]
A mapping of option names to their underlying values.
recommendation : Optional[str]
The name of the option that is currently recommended. None if no
recommendation is available.
"""
options: Dict[str, Any]
recommendation: Optional[str] = None
class DecisionSupport:
"""
Simple decision support system that ranks options based on a scoring function.
A `scorer` callable is provided to map option values to numeric scores. The
`evaluate` method selects the option with the highest score.
"""
def __init__(self, scorer: Callable[[Any], float]) -> None:
self.scorer = scorer
def evaluate(self, options: Dict[str, Any]) -> Decision:
"""
Evaluate and recommend the option with the highest score.
Parameters
----------
options : Dict[str, Any]
A mapping from option names to their raw values.
Returns
-------
Decision
A Decision object containing the original options and the recommended key.
"""
if not options:
return Decision(options, None)
scores = {name: self.scorer(val) for name, val in options.items()}
best = max(scores, key=scores.get)
return Decision(options, best)
if __name__ == "__main__":
# Example: choose the largest number
def identity_score(x: float) -> float:
return x
ds = DecisionSupport(identity_score)
opts = {"A": 0.5, "B": 0.8, "C": 0.3}
result = ds.evaluate(opts)
print("Recommendation:", result.recommendation)

View File

@@ -0,0 +1,55 @@
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class Emotion:
"""
Represents a simple emotional state.
Attributes
----------
valence : float
Emotional valence between -1 (negative) and 1 (positive).
arousal : float
Emotional arousal level between 0 (calm) and 1 (excited).
"""
valence: float
arousal: float
class EmpathyEngine:
"""
Adjusts responses based on the user's emotional state.
"""
def respond(self, message: str, emotion: Emotion) -> str:
"""
Prepend a response prefix derived from the emotion.
Parameters
----------
message : str
The core message to deliver.
emotion : Emotion
The user's emotional state.
Returns
-------
str
A response tuned by emotion.
"""
if emotion.valence < -0.3:
prefix = "I'm sorry to hear that. "
elif emotion.valence > 0.3:
prefix = "That's great! "
else:
prefix = "I see. "
return prefix + message
if __name__ == "__main__":
engine = EmpathyEngine()
sad = Emotion(-0.6, 0.7)
happy = Emotion(0.8, 0.4)
neutral = Emotion(0.0, 0.2)
print(engine.respond("How can I assist you?", sad))
print(engine.respond("Congratulations on your progress!", happy))
print(engine.respond("Let's continue.", neutral))

View File

@@ -0,0 +1,59 @@
from __future__ import annotations
from dataclasses import dataclass
from typing import List, Optional
@dataclass
class Feedback:
"""Represents a piece of user feedback with an optional numeric rating.
Attributes
----------
user_id : str
Identifier of the user providing feedback.
message : str
The textual content of the feedback.
rating : Optional[int]
Optional numeric rating (e.g., 15) associated with the feedback.
"""
user_id: str
message: str
rating: Optional[int] = None
class FeedbackManager:
"""
Collects and processes feedback from users.
This manager stores feedback entries and can compute simple statistics
over them.
"""
def __init__(self) -> None:
self._feedback: List[Feedback] = []
def submit(self, feedback: Feedback) -> None:
"""Submit new feedback."""
self._feedback.append(feedback)
def average_rating(self) -> Optional[float]:
"""Compute the average rating across all feedback that has a rating."""
ratings = [f.rating for f in self._feedback if f.rating is not None]
if ratings:
return sum(ratings) / len(ratings)
return None
def messages(self) -> List[str]:
"""Return a list of all feedback messages."""
return [f.message for f in self._feedback]
if __name__ == "__main__":
mgr = FeedbackManager()
mgr.submit(Feedback(user_id="u1", message="Great job!", rating=5))
mgr.submit(Feedback(user_id="u2", message="Could be better.", rating=3))
mgr.submit(Feedback(user_id="u3", message="Loved the experience!"))
print("Average rating:", mgr.average_rating())
print("Messages:", mgr.messages())

View File

@@ -0,0 +1,54 @@
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Dict, List
@dataclass
class InterfaceElement:
"""
Represents a UI element in a human-machine interface.
Attributes
----------
name : str
Identifier of the element.
element_type : str
Type of element (e.g., "button", "slider").
properties : Dict[str, str]
Optional dictionary of element-specific properties.
"""
name: str
element_type: str
properties: Dict[str, str] = field(default_factory=dict)
class InterfaceDesigner:
"""
A simple interface builder that collects elements and renders them.
"""
def __init__(self) -> None:
self.elements: List[InterfaceElement] = []
def add_element(self, element: InterfaceElement) -> None:
"""Add a new interface element to the design."""
self.elements.append(element)
def render(self) -> str:
"""
Produce a human-readable representation of the interface.
Returns
-------
str
A multiline string describing each element.
"""
lines = []
for e in self.elements:
props = ", ".join(f"{k}={v}" for k, v in e.properties.items()) if e.properties else ""
lines.append(f"{e.element_type.capitalize()} '{e.name}'" + (f" ({props})" if props else ""))
return "\n".join(lines)
if __name__ == "__main__":
designer = InterfaceDesigner()
designer.add_element(InterfaceElement("Submit", "button", {"color": "blue"}))
designer.add_element(InterfaceElement("Volume", "slider", {"min": "0", "max": "10"}))
print(designer.render())

View File

@@ -0,0 +1,59 @@
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Callable, List
@dataclass
class LearningCycle:
"""
Represents a single learning cycle iteration.
Attributes
----------
iteration : int
The iteration number (starting from 1).
state : Any
The state after the update function is applied.
reward : float
The reward computed for this cycle.
"""
iteration: int
state: Any
reward: float
class LearningLoop:
"""
Executes an iterative learning loop with update and reward functions.
"""
def __init__(self, update_fn: Callable[[Any], Any], reward_fn: Callable[[Any], float], max_iter: int = 5) -> None:
self.update_fn = update_fn
self.reward_fn = reward_fn
self.max_iter = max_iter
def run(self, initial_state: Any) -> List[LearningCycle]:
"""
Run the learning loop over a number of iterations.
Parameters
----------
initial_state : Any
The starting state for the learning process.
Returns
-------
List[LearningCycle]
A list of learning cycles capturing state and reward at each step.
"""
cycles: List[LearningCycle] = []
state = initial_state
for i in range(1, self.max_iter + 1):
state = self.update_fn(state)
reward = self.reward_fn(state)
cycles.append(LearningCycle(i, state, reward))
return cycles
if __name__ == "__main__":
# Example usage: increment state and reward as negative distance from target 10
loop = LearningLoop(lambda x: x + 1, lambda x: -abs(10 - x), max_iter=3)
for cycle in loop.run(0):
print(cycle)

Some files were not shown because too many files have changed in this diff Show More