five-bules / app.py
Taylor
chore: add void attention footer links
94b23a9
"""
Five Bules -- Personality as Void Walking
Act 4: Five personality profiles on fork/race/fold/vent/interfere axes.
Same model, same prompt. Different personality = different decoder config.
Explorer forks broadly. Builder folds tightly. Creative races freely.
Anxious interferes early. Balanced converges to phi.
"""
import gradio as gr
import json
import time
import subprocess
import urllib.request
import urllib.error
import select
from concurrent.futures import ThreadPoolExecutor, as_completed
print("[Five Bules] Starting Aether...", flush=True)
aether_proc = subprocess.Popen(
["node", "aether-server.mjs"],
env={**__import__('os').environ, "AETHER_PORT": "7861"},
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
)
print("[Five Bules] Waiting for Aether...", flush=True)
for attempt in range(300):
try:
req = urllib.request.Request("http://127.0.0.1:7861/health")
resp = urllib.request.urlopen(req, timeout=2)
health = json.loads(resp.read())
if health.get("status") == "ok" and health.get("models"):
print(f"[Five Bules] Aether ready (models: {health.get('models')})", flush=True)
break
except Exception:
pass
if aether_proc.stdout and select.select([aether_proc.stdout], [], [], 0)[0]:
line = aether_proc.stdout.readline()
if line: print(f" {line.decode().strip()}", flush=True)
time.sleep(1)
else:
print("[Five Bules] WARNING: Aether not ready after 300s", flush=True)
def call_personality(prompt, max_tokens, model, personality):
try:
data = json.dumps({"prompt": prompt, "max_tokens": max_tokens, "model": model, "personality": personality}).encode()
req = urllib.request.Request("http://127.0.0.1:7861/generate-personality", data=data, headers={"Content-Type": "application/json"})
resp = urllib.request.urlopen(req, timeout=600)
return json.loads(resp.read())
except urllib.error.HTTPError as e:
body = e.read().decode() if e.fp else str(e)
try: detail = json.loads(body).get("error", body[:300])
except Exception: detail = body[:300]
return {"text": f"[Error: {detail}]", "tokens": 0, "totalTimeMs": 0, "avgTokenMs": 0}
except Exception as e:
return {"text": f"[Error: {e}]", "tokens": 0, "totalTimeMs": 0, "avgTokenMs": 0}
def run_all(prompt, max_tokens, model_name):
if not prompt or not prompt.strip():
yield "", "", "", "", "", ""
return
max_tokens = int(max_tokens)
personas = ["explorer", "builder", "creative", "anxious", "balanced"]
results = {p: [None] for p in personas}
def run(p):
results[p][0] = call_personality(prompt, max_tokens, model_name, p)
def fmt(r):
return r["text"] if r else "generating..."
def stats(r):
if not r: return "running..."
return f'{r["tokens"]} tok / {r["totalTimeMs"]/1000:.1f}s / C3: {r.get("metacogSummary",{}).get("totalPerturbations",0)}'
def build():
texts = tuple(fmt(results[p][0]) for p in personas)
diag_lines = []
for p in personas:
r = results[p][0]
if r:
diag_lines.append(f"[{p.upper()}] {stats(r)}")
diag_lines.append(f" temps={r.get('temperatures','?')} | {r.get('personalityLabel','')}")
diag_lines.append("")
return texts + ("\n".join(diag_lines),)
with ThreadPoolExecutor(max_workers=5) as pool:
futures = {pool.submit(run, p): p for p in personas}
for future in as_completed(futures):
future.result()
yield build()
yield build()
CSS = """
.gradio-container { max-width: 1200px !important; margin: 0 auto !important; }
.gradio-container, .dark { background: #09090b !important; }
#hero { text-align: center; padding: 2rem 0 1rem; }
#hero h1 { font-size: 2.5rem; font-weight: 300; letter-spacing: -0.02em; color: #fafafa; margin: 0; }
#hero .accent { color: #f59e0b; }
#hero .subtitle { color: #71717a; font-size: 0.95rem; margin-top: 0.5rem; }
.response-card { background: #0c0c0f !important; border: 1px solid #1f1f23 !important; border-radius: 8px !important; }
.response-card textarea { background: #0c0c0f !important; border: none !important; color: #e4e4e7 !important; font-size: 0.9rem !important; line-height: 1.5 !important; }
.p-label { font-size: 0.75rem !important; text-transform: uppercase !important; letter-spacing: 0.05em !important; font-weight: 600 !important; }
#prompt-input > label > span { display: none !important; }
#prompt-input textarea { background: #111114 !important; border: 1px solid #1f1f23 !important; border-radius: 8px !important; color: #fafafa !important; font-size: 1rem !important; padding: 1rem !important; }
#prompt-input textarea:focus { border-color: #f59e0b !important; }
#gen-btn { background: #f59e0b !important; border: none !important; border-radius: 8px !important; font-weight: 500 !important; color: #09090b !important; }
.prompt-chip { background: #111114 !important; border: 1px solid #1f1f23 !important; border-radius: 6px !important; color: #a1a1aa !important; font-size: 0.85rem !important; }
.prompt-chip:hover { border-color: #f59e0b !important; color: #fafafa !important; }
#footer { text-align: center; padding: 2rem 0; border-top: 1px solid #1f1f23; margin-top: 2rem; }
#footer p { color: #52525b; font-size: 0.8rem; }
#footer a { color: #f59e0b; text-decoration: none; }
footer.svelte-1ax1toq { display: none !important; }
.built-with { display: none !important; }
"""
with gr.Blocks(css=CSS, theme=gr.themes.Base(primary_hue="yellow", neutral_hue="zinc"), title="Five Bules") as demo:
gr.HTML("""
<div id="hero">
<h1>The Five <span class="accent">Bules</span></h1>
<p class="subtitle">Personality as void walking. Same model, same prompt, five decoder configurations.<br/>
Each personality is a position on the fork/race/fold/vent/interfere axes.<br/>
THM-FIVE-BULE-PERSONALITY -- all five converge to &phi;<sub>inv</sub> &approx; 0.618.</p>
</div>
""")
with gr.Row():
prompt = gr.Textbox(elem_id="prompt-input", placeholder="Tell me about yourself.", lines=2, label="Prompt", show_label=False, interactive=True, scale=4)
with gr.Column(scale=1):
model_choice = gr.Dropdown(
choices=["buleyean-smollm2", "base-smollm2", "buleyean-qwen", "base-qwen"],
value="buleyean-smollm2", label="Model",
)
max_tok = gr.Slider(minimum=8, maximum=8192, value=64, step=1, label="Max tokens")
btn = gr.Button("Generate All Five", elem_id="gen-btn", variant="primary")
with gr.Row(equal_height=True):
with gr.Column():
gr.HTML('<p class="p-label" style="color:#3b82f6">Explorer -- forks broadly</p>')
explorer_out = gr.Textbox(lines=8, show_label=False, interactive=False, elem_classes=["response-card"])
with gr.Column():
gr.HTML('<p class="p-label" style="color:#22c55e">Builder -- folds tightly</p>')
builder_out = gr.Textbox(lines=8, show_label=False, interactive=False, elem_classes=["response-card"])
with gr.Column():
gr.HTML('<p class="p-label" style="color:#a855f7">Creative -- races freely</p>')
creative_out = gr.Textbox(lines=8, show_label=False, interactive=False, elem_classes=["response-card"])
with gr.Row(equal_height=True):
with gr.Column():
gr.HTML('<p class="p-label" style="color:#ef4444">Anxious -- interferes early</p>')
anxious_out = gr.Textbox(lines=8, show_label=False, interactive=False, elem_classes=["response-card"])
with gr.Column():
gr.HTML('<p class="p-label" style="color:#f59e0b">Balanced -- phi convergence</p>')
balanced_out = gr.Textbox(lines=8, show_label=False, interactive=False, elem_classes=["response-card"])
with gr.Column():
gr.HTML('<p class="p-label" style="color:#71717a">Diagnostics</p>')
diag_out = gr.Textbox(lines=8, show_label=False, interactive=False, elem_classes=["response-card"])
outputs = [explorer_out, builder_out, creative_out, anxious_out, balanced_out, diag_out]
inputs = [prompt, max_tok, model_choice]
def run(p, mt, m):
for vals in run_all(p, mt, m):
yield vals
btn.click(run, inputs, outputs)
prompt.submit(run, inputs, outputs)
gr.HTML('<p style="color:#52525b; font-size:0.8rem; margin-top:1.5rem; margin-bottom:0.5rem;">Try these:</p>')
with gr.Row():
for p in ["Tell me about yourself.", "What scares you?", "Describe the perfect day.", "How do you handle failure?"]:
gr.Button(p, size="sm", elem_classes=["prompt-chip"]).click(
fn=lambda x=p: x, outputs=[prompt]
).then(fn=run, inputs=inputs, outputs=outputs)
gr.HTML("""
<div id="footer">
<p style="color:#a1a1aa; font-size:0.85rem; margin-bottom:0.5rem;">
SmolLM2-360M + Qwen2.5-0.5B &middot; Aether WASM-SIMD &middot; Two architectures, five personalities
</p>
<p>
<a href="https://forkracefold.com/">Whitepaper</a> &middot;
<a href="https://huggingface.co/spaces/forkjoin-ai/aether">Aether</a> &middot;
<a href="https://huggingface.co/spaces/forkjoin-ai/aether-browser">Edge Mesh</a> &middot;
<a href="https://huggingface.co/spaces/forkjoin-ai/the-void">The Void</a> &middot;
<a href="https://huggingface.co/spaces/forkjoin-ai/buleyean-rl">Buleyean RL</a> &middot;
<a href="https://huggingface.co/spaces/forkjoin-ai/glossolalia">Glossolalia</a> &middot;
<a href="https://huggingface.co/spaces/forkjoin-ai/metacog">Metacog</a> &middot;
<a href="https://huggingface.co/spaces/forkjoin-ai/five-bules">Five Bules</a> &middot;
<a href="https://huggingface.co/spaces/forkjoin-ai/void-attention">Void Attention</a> &middot;
<a href="https://huggingface.co/spaces/forkjoin-ai/quark-personality">Quark Personality</a>
</p>
<p style="margin-top:1rem;">Personality = 5 measurable distances &middot; THM-PHI-ATTRACTOR &middot;
<a href="https://forkracefold.com/">&phi;&sup2; = &phi; + 1</a></p>
<p style="margin-top:1rem;">Copyright 2026 forkjoin.ai</p>
</div>
""")
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860)