mirror of
https://github.com/blackboxprogramming/BlackRoad-Operating-System.git
synced 2026-03-18 06:34:00 -05:00
feat: Add Research Lab pack with paralleled math modules
Create comprehensive research-lab pack structure with mathematical and quantum computing modules from blackroad-prism-console: Math Modules: - hilbert_core.py: Hilbert space symbolic reasoning - collatz/: Distributed Collatz conjecture verification - linmath/: Linear mathematics C library - lucidia_math_forge/: Symbolic proof engine - lucidia_math_lab/: Experimental mathematics Quantum Modules: - lucidia_quantum/: Quantum core - quantum_engine/: Circuit simulation Experiments: - br_math/: Gödel gap, quantum experiments Includes pack.yaml manifest and comprehensive README. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
9
packs/research-lab/math/collatz/Dockerfile
Normal file
9
packs/research-lab/math/collatz/Dockerfile
Normal file
@@ -0,0 +1,9 @@
|
||||
# Multi-arch Python base; works on x86_64, arm64, aarch64 (Jetson/RPi)
|
||||
FROM python:3.11-slim
|
||||
WORKDIR /app
|
||||
COPY requirements.txt ./
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
COPY collatz ./collatz
|
||||
COPY scripts ./scripts
|
||||
COPY collatz/config.yaml ./collatz/config.yaml
|
||||
CMD ["bash", "scripts/run_local.sh"]
|
||||
42
packs/research-lab/math/collatz/README.md
Normal file
42
packs/research-lab/math/collatz/README.md
Normal file
@@ -0,0 +1,42 @@
|
||||
# Collatz Campaign (LLM-assisted)
|
||||
|
||||
Goal: search for Collatz counterexamples or extend verified bounds.
|
||||
|
||||
- Deterministic chunking; resumable via SQLite.
|
||||
- Each chunk is verified by a second pass (different arithmetic schedule).
|
||||
- Anomalies emit full "witness" traces for human audit.
|
||||
|
||||
## Quickstart
|
||||
|
||||
```bash
|
||||
python3 -m venv .venv && source .venv/bin/activate
|
||||
pip install -r requirements.txt
|
||||
python -m collatz.orchestrator --start 1 --end 100000000 --chunk 100000 --workers 4
|
||||
```
|
||||
|
||||
## Run on multiple machines (Jetson/RPi/PC)
|
||||
|
||||
```bash
|
||||
# On each device, point to the same repo folder (or sync via git pulls),
|
||||
# then run worker(s) pulling chunks from the same SQLite DB file:
|
||||
python -m collatz.worker --db ./campaign.sqlite --workers 4
|
||||
```
|
||||
|
||||
Or just:
|
||||
|
||||
```bash
|
||||
bash scripts/run_local.sh
|
||||
```
|
||||
|
||||
## Outputs
|
||||
|
||||
- `campaign.sqlite`: jobs, results, anomalies, and checkpoints.
|
||||
- `artifacts/`: CSV summaries, anomaly traces (repro inputs + partial trajectories).
|
||||
- `RESULTS.md`: rolling human-readable results.
|
||||
|
||||
## What counts as "progress"?
|
||||
|
||||
1. No counterexample up to N (monotone increase of checked bound).
|
||||
2. New records: largest stopping time / maximum excursion discovered with full witnesses.
|
||||
|
||||
_Last updated on 2025-09-11_
|
||||
6
packs/research-lab/math/collatz/config.yaml
Normal file
6
packs/research-lab/math/collatz/config.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
db_path: './campaign.sqlite'
|
||||
artifact_dir: './artifacts'
|
||||
chunk_size: 100000
|
||||
verify_pass: true
|
||||
max_trace_steps: 1000000
|
||||
report_every_seconds: 10
|
||||
115
packs/research-lab/math/collatz/db.py
Normal file
115
packs/research-lab/math/collatz/db.py
Normal file
@@ -0,0 +1,115 @@
|
||||
import os
|
||||
import sqlite3
|
||||
import time
|
||||
from typing import Optional, Tuple
|
||||
|
||||
SCHEMA = """
|
||||
CREATE TABLE IF NOT EXISTS jobs(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
start_n INTEGER NOT NULL,
|
||||
end_n INTEGER NOT NULL,
|
||||
status TEXT NOT NULL DEFAULT 'queued', -- queued|running|done|error
|
||||
claimed_at REAL,
|
||||
finished_at REAL
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS results(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
job_id INTEGER NOT NULL,
|
||||
min_n INTEGER NOT NULL,
|
||||
max_n INTEGER NOT NULL,
|
||||
max_stopping_time INTEGER,
|
||||
max_excursion INTEGER,
|
||||
checked_count INTEGER,
|
||||
verified INTEGER NOT NULL, -- 0/1
|
||||
FOREIGN KEY(job_id) REFERENCES jobs(id)
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS anomalies(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
n0 INTEGER NOT NULL,
|
||||
reason TEXT NOT NULL,
|
||||
job_id INTEGER,
|
||||
trace_path TEXT,
|
||||
created_at REAL
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_jobs_status ON jobs(status);
|
||||
"""
|
||||
|
||||
|
||||
def connect(path: str):
|
||||
os.makedirs(os.path.dirname(path) or ".", exist_ok=True)
|
||||
conn = sqlite3.connect(path, timeout=60, isolation_level=None)
|
||||
conn.execute("PRAGMA journal_mode=WAL;")
|
||||
for stmt in SCHEMA.strip().split(";"):
|
||||
if stmt.strip():
|
||||
conn.execute(stmt)
|
||||
return conn
|
||||
|
||||
|
||||
def enqueue_chunks(conn, start_n: int, end_n: int, chunk: int):
|
||||
cur = conn.cursor()
|
||||
n = start_n
|
||||
while n <= end_n:
|
||||
cur.execute(
|
||||
"INSERT INTO jobs(start_n,end_n) VALUES(?,?)",
|
||||
(n, min(n + chunk - 1, end_n)),
|
||||
)
|
||||
n += chunk
|
||||
|
||||
|
||||
def claim_job(conn) -> Optional[Tuple[int, int, int]]:
|
||||
cur = conn.cursor()
|
||||
cur.execute("BEGIN IMMEDIATE;")
|
||||
row = cur.execute(
|
||||
"SELECT id,start_n,end_n FROM jobs WHERE status='queued' ORDER BY id LIMIT 1"
|
||||
).fetchone()
|
||||
if not row:
|
||||
conn.execute("COMMIT;")
|
||||
return None
|
||||
job_id, s, e = row
|
||||
conn.execute(
|
||||
"UPDATE jobs SET status='running', claimed_at=? WHERE id=?",
|
||||
(time.time(), job_id),
|
||||
)
|
||||
conn.execute("COMMIT;")
|
||||
return job_id, s, e
|
||||
|
||||
|
||||
def finish_job(
|
||||
conn,
|
||||
job_id: int,
|
||||
verified: int,
|
||||
min_n: int,
|
||||
max_n: int,
|
||||
max_stopping_time: int,
|
||||
max_excursion: int,
|
||||
checked: int,
|
||||
):
|
||||
conn.execute(
|
||||
"INSERT INTO results(job_id,min_n,max_n,max_stopping_time,max_excursion,checked_count,verified) VALUES(?,?,?,?,?,?,?)",
|
||||
(job_id, min_n, max_n, max_stopping_time, max_excursion, checked, verified),
|
||||
)
|
||||
conn.execute(
|
||||
"UPDATE jobs SET status='done', finished_at=? WHERE id=?",
|
||||
(time.time(), job_id),
|
||||
)
|
||||
|
||||
|
||||
def record_anomaly(
|
||||
conn,
|
||||
n0: int,
|
||||
reason: str,
|
||||
job_id: Optional[int],
|
||||
trace_path: Optional[str],
|
||||
):
|
||||
conn.execute(
|
||||
"INSERT INTO anomalies(n0,reason,job_id,trace_path,created_at) VALUES(?,?,?,?,?)",
|
||||
(n0, reason, job_id, trace_path, time.time()),
|
||||
)
|
||||
|
||||
|
||||
def status(conn):
|
||||
cur = conn.cursor()
|
||||
queued = cur.execute("SELECT COUNT() FROM jobs WHERE status='queued'").fetchone()[0]
|
||||
running = cur.execute("SELECT COUNT() FROM jobs WHERE status='running'").fetchone()[0]
|
||||
done = cur.execute("SELECT COUNT(*) FROM jobs WHERE status='done'").fetchone()[0]
|
||||
return queued, running, done
|
||||
38
packs/research-lab/math/collatz/orchestrator.py
Normal file
38
packs/research-lab/math/collatz/orchestrator.py
Normal file
@@ -0,0 +1,38 @@
|
||||
import argparse
|
||||
import time
|
||||
|
||||
import yaml
|
||||
|
||||
from .db import connect, enqueue_chunks, status
|
||||
|
||||
|
||||
def main():
|
||||
ap = argparse.ArgumentParser()
|
||||
ap.add_argument("--start", type=int, required=True)
|
||||
ap.add_argument("--end", type=int, required=True)
|
||||
ap.add_argument("--chunk", type=int, default=None)
|
||||
ap.add_argument("--db", default="./campaign.sqlite")
|
||||
ap.add_argument("--cfg", default="./collatz/config.yaml")
|
||||
ap.add_argument("--workers", type=int, default=0, help="optional hint for humans/logs only")
|
||||
args = ap.parse_args()
|
||||
|
||||
cfg = yaml.safe_load(open(args.cfg))
|
||||
db = connect(args.db)
|
||||
chunk = args.chunk or int(cfg["chunk_size"])
|
||||
|
||||
enqueue_chunks(db, args.start, args.end, chunk)
|
||||
print(f"Enqueued [{args.start}, {args.end}] in chunks of {chunk}.")
|
||||
print("Run workers on each device: `python -m collatz.worker --db ./campaign.sqlite`")
|
||||
print("Status will refresh every ~10s.\n")
|
||||
|
||||
while True:
|
||||
q, r, d = status(db)
|
||||
print(f"[{time.strftime('%Y-%m-%d %H:%M:%S')}] queued={q} running={r} done={d}")
|
||||
if q == 0 and r == 0:
|
||||
print("All jobs complete.")
|
||||
break
|
||||
time.sleep(10)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
23
packs/research-lab/math/collatz/verifier.py
Normal file
23
packs/research-lab/math/collatz/verifier.py
Normal file
@@ -0,0 +1,23 @@
|
||||
import argparse
|
||||
|
||||
from .db import connect
|
||||
|
||||
|
||||
def main():
|
||||
ap = argparse.ArgumentParser()
|
||||
ap.add_argument("--db", default="./campaign.sqlite")
|
||||
args = ap.parse_args()
|
||||
db = connect(args.db)
|
||||
cur = db.cursor()
|
||||
jobs, checked, max_stop, max_exc = cur.execute(
|
||||
"SELECT COUNT(), SUM(checked_count), MAX(max_stopping_time), MAX(max_excursion) FROM results"
|
||||
).fetchone()
|
||||
print(
|
||||
f"Jobs: {jobs} Integers checked: {checked or 0} Record stopping time: {max_stop or 0} Record excursion: {max_exc or 0}"
|
||||
)
|
||||
anomalies = cur.execute("SELECT COUNT() FROM anomalies").fetchone()[0]
|
||||
print(f"Anomalies (need audit): {anomalies}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
108
packs/research-lab/math/collatz/worker.py
Normal file
108
packs/research-lab/math/collatz/worker.py
Normal file
@@ -0,0 +1,108 @@
|
||||
import argparse
|
||||
import os
|
||||
from typing import Tuple
|
||||
|
||||
import psutil
|
||||
import yaml
|
||||
|
||||
from .db import claim_job, connect, finish_job, record_anomaly
|
||||
|
||||
|
||||
# Fast integer Collatz step with power-of-two compression
|
||||
def collatz_step(n: int) -> int:
|
||||
if n % 2 == 0:
|
||||
return n // 2
|
||||
# 3n+1 then compress factors of 2
|
||||
n = 3 * n + 1
|
||||
# remove all trailing zeros in binary (i.e., divide by 2^k)
|
||||
return n >> (
|
||||
(n & -n).bit_length() - 1
|
||||
) # bit trick: count trailing zeros via bit_length of lowbit
|
||||
|
||||
|
||||
def stopping_time_and_excursion(n0: int, max_steps: int = 10_000_000) -> Tuple[int, int]:
|
||||
n = n0
|
||||
max_exc = n
|
||||
steps = 0
|
||||
while n != 1 and steps < max_steps:
|
||||
n = collatz_step(n)
|
||||
if n > max_exc:
|
||||
max_exc = n
|
||||
steps += 1
|
||||
if n != 1:
|
||||
return -1, max_exc # anomaly (didn't reach 1 within cap)
|
||||
return steps, max_exc
|
||||
|
||||
|
||||
def verify_second_pass(n0: int) -> bool:
|
||||
# Different schedule: classic per-step without compression, but still safe.
|
||||
n = n0
|
||||
seen_steps = 0
|
||||
while n != 1 and seen_steps < 20_000_000:
|
||||
if n % 2 == 0:
|
||||
n //= 2
|
||||
else:
|
||||
n = 3 * n + 1
|
||||
seen_steps += 1
|
||||
return n == 1
|
||||
|
||||
|
||||
def run_job(db_path: str, artifact_dir: str, job_id: int, s: int, e: int, verify: bool):
|
||||
os.makedirs(artifact_dir, exist_ok=True)
|
||||
conn = connect(db_path)
|
||||
checked = 0
|
||||
max_stop = 0
|
||||
max_exc = 0
|
||||
for n0 in range(s, e + 1):
|
||||
st, exc = stopping_time_and_excursion(n0)
|
||||
if st < 0:
|
||||
# anomaly: didn't converge within cap
|
||||
trace_path = os.path.join(artifact_dir, f"anomaly_trace_{n0}.txt")
|
||||
with open(trace_path, "w") as f:
|
||||
n = n0
|
||||
for _ in range(1_000_000):
|
||||
f.write(str(n) + "\n")
|
||||
if n == 1:
|
||||
break
|
||||
n = 3 * n + 1 if n & 1 else n // 2
|
||||
record_anomaly(conn, n0, "no_convergence_cap", job_id, trace_path)
|
||||
else:
|
||||
if st > max_stop:
|
||||
max_stop = st
|
||||
if exc > max_exc:
|
||||
max_exc = exc
|
||||
if verify and not verify_second_pass(n0):
|
||||
record_anomaly(conn, n0, "verify_mismatch", job_id, None)
|
||||
checked += 1
|
||||
finish_job(conn, job_id, 1 if verify else 0, s, e, max_stop, max_exc, checked)
|
||||
|
||||
|
||||
def worker_loop(db_path: str, artifact_dir: str, verify: bool):
|
||||
conn = connect(db_path)
|
||||
while True:
|
||||
slot = claim_job(conn)
|
||||
if not slot:
|
||||
break
|
||||
job_id, s, e = slot
|
||||
run_job(db_path, artifact_dir, job_id, s, e, verify)
|
||||
|
||||
|
||||
def main():
|
||||
ap = argparse.ArgumentParser()
|
||||
ap.add_argument("--db", default="./campaign.sqlite")
|
||||
ap.add_argument("--cfg", default="./collatz/config.yaml")
|
||||
ap.add_argument("--workers", type=int, default=max(1, psutil.cpu_count(logical=False) or 1))
|
||||
args = ap.parse_args()
|
||||
cfg = yaml.safe_load(open(args.cfg))
|
||||
artifact_dir = cfg["artifact_dir"]
|
||||
verify = bool(cfg.get("verify_pass", True))
|
||||
|
||||
# Simple local worker loop
|
||||
import multiprocessing as mp
|
||||
|
||||
with mp.Pool(processes=args.workers) as pool:
|
||||
pool.starmap(worker_loop, [(args.db, artifact_dir, verify)] * args.workers)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user