Spaces:
Running
Running
| """ | |
| Mnemo v7.0 Gradio Demo Space | |
| ============================= | |
| Interactive demo for the Mnemo memory engine. | |
| Imports the shared PersistentMnemo from mnemo_core.py. | |
| v7.0: Updated all UI + API functions to use public API methods only. | |
| No direct access to engine internals (dicts, locks, EntityIndex). | |
| Backend now uses SQLite + FTS5 + FAISS + NumPy hybrid architecture. | |
| """ | |
| import json | |
| import gradio as gr | |
| from mnemo_core import PersistentMnemo | |
| # Global instance now fully synced to your Hugging Face Dataset! | |
| mnemo = PersistentMnemo() | |
| EXAMPLE_MEMORIES = [ | |
| "During Book 3, Sebastian is kept in captivity under sedation and psychological manipulation, including drug protocols and tattooing.", | |
| "Alistair Fitzroy offered Sebastian a choice between institutionalization or rest and treatment at Blackwood Estate, leading to his captivity.", | |
| "Isabella Blackwood is fixated on Sebastian, whom she believes can help her transcend humanity.", | |
| "Isabella uses rituals, psychological conditioning, and symbolic care to entrap Sebastian.", | |
| "Alistair Fitzroy oversees Sebastian's drug regimen in captivity.", | |
| "Evelyn Whitmore withdraws from medical school after publicly confronting Isabella to rescue Sebastian.", | |
| ] | |
| # ============================================================================ | |
| # LEGACY BLOB MEMORY FUNCTIONS | |
| # ============================================================================ | |
| def initialize_demo(): | |
| mnemo.clear() | |
| for mem in EXAMPLE_MEMORIES: | |
| mnemo.add(mem) | |
| stats = mnemo.get_stats() | |
| status = "Initialized with " + str(len(EXAMPLE_MEMORIES)) + " memories, " + str(stats.get("links_created", 0)) + " links created" | |
| return status, get_all_memories() | |
| def add_memory(content, priority): | |
| if not content.strip(): | |
| return "Please enter content", get_all_memories() | |
| result = mnemo.add(content.strip(), priority=priority) | |
| if result: | |
| return "Added: " + result, get_all_memories() | |
| else: | |
| return "Rejected (low quality - try longer or more detailed content)", get_all_memories() | |
| def search_memories(query, top_k=15): | |
| if not query.strip(): | |
| return "Please enter a query" | |
| results = mnemo.search(query.strip(), top_k=int(top_k)) | |
| if not results: | |
| return "No memories found above threshold" | |
| output = [] | |
| for r in results: | |
| output.append("**[" + r.tier.upper() + "]** score=" + str(round(r.score, 3))) | |
| output.append(r.content) | |
| output.append("_Semantic: " + str(round(r.semantic_score, 2)) + ", Links: " + str(round(r.link_score, 2)) + "_") | |
| output.append("---") | |
| return "\n".join(output) | |
| def check_injection(query, context=""): | |
| should, reason, confidence = mnemo.should_inject(query, context) | |
| if should: | |
| status = "**INJECT MEMORY** (confidence: " + str(round(confidence, 2)) + ")" | |
| else: | |
| status = "**SKIP MEMORY** (confidence: " + str(round(confidence, 2)) + ")" | |
| return status + "\n\nReason: `" + reason + "`" | |
| def get_context_for_injection(query, top_k=15): | |
| if not query.strip(): | |
| return "Please enter a query" | |
| context = mnemo.get_context(query.strip(), top_k=int(top_k)) | |
| return "```\n" + context + "\n```" if context else "_No relevant context found_" | |
| def delete_memory_by_id(memory_id): | |
| if not memory_id.strip(): | |
| return "Please enter a memory ID", get_all_memories() | |
| success = mnemo.delete(memory_id.strip()) | |
| if success: | |
| return "Deleted: " + memory_id, get_all_memories() | |
| else: | |
| return "Not found: " + memory_id, get_all_memories() | |
| def run_maintenance(): | |
| result = mnemo.maintenance() | |
| parts = [] | |
| parts.append(str(result["decayed"]) + " decayed") | |
| parts.append(str(result["pruned"]) + " pruned") | |
| parts.append(str(result["links_decayed"]) + " links decayed") | |
| parts.append(str(result["links_pruned"]) + " links pruned") | |
| parts.append(str(result["tier_promotions"]) + " promoted") | |
| parts.append(str(result["tier_demotions"]) + " demoted") | |
| return "Maintenance: " + ", ".join(parts) | |
| def get_all_memories(): | |
| if len(mnemo) == 0: | |
| return "_No memories stored_" | |
| output = [] | |
| for mem in mnemo.list_all(): | |
| display = mem["content"][:80] | |
| suffix = "..." if len(mem["content"]) > 80 else "" | |
| line = "* **" + mem["id"] + "** [" + mem["tier"] + "] " | |
| line += "(q=" + str(mem["quality_score"]) + ", p=" + str(mem["priority"]) + "): " | |
| line += display + suffix | |
| output.append(line) | |
| return "\n".join(output) | |
| def get_stats(): | |
| stats = mnemo.get_stats() | |
| output = ["### System Statistics\n"] | |
| for k, v in stats.items(): | |
| if isinstance(v, dict): | |
| output.append("**" + k + ":**") | |
| for kk, vv in v.items(): | |
| output.append(" - " + str(kk) + ": " + str(vv)) | |
| else: | |
| output.append("* **" + k + "**: " + str(v)) | |
| return "\n".join(output) | |
| def clear_memories(): | |
| mnemo.clear() | |
| return "All memories cleared", "_No memories stored_" | |
| # ============================================================================ | |
| # v6.0: CONNECTION POINT FUNCTIONS | |
| # ============================================================================ | |
| def add_connection_point(entity, point_type, value, connects_to, reason, weight, category, source): | |
| if not entity.strip(): | |
| return "Please enter an entity name", get_all_points() | |
| result = mnemo.add_point( | |
| entity=entity.strip(), point_type=point_type.strip(), | |
| value=value.strip(), connects_to=connects_to.strip(), | |
| reason=reason.strip(), weight=weight, | |
| category=category.lower(), source=source, | |
| ) | |
| if result: | |
| return "Added: " + result, get_all_points() | |
| return "Failed to add point", get_all_points() | |
| def graph_search(query, top_k=15): | |
| if not query.strip(): | |
| return "Please enter a query" | |
| results = mnemo.graph_search(query.strip(), top_k=int(top_k)) | |
| if not results: | |
| return "No connection points found" | |
| output = [] | |
| for r in results: | |
| line = "**[" + r.get("category", "?").upper() + "]** " | |
| line += "score=" + str(r.get("score", 0)) | |
| line += " (graph=" + str(r.get("graph_score", 0)) | |
| line += ", sem=" + str(r.get("semantic_score", 0)) + ")" | |
| output.append(line) | |
| detail = r.get("entity", "") + " | " + r.get("point_type", "") | |
| if r.get("connects_to"): | |
| detail += " → " + r["connects_to"] | |
| if r.get("value"): | |
| detail += " | " + r["value"] | |
| output.append(detail) | |
| if r.get("reason"): | |
| output.append("_reason: " + r["reason"] + "_") | |
| output.append("---") | |
| return "\n".join(output) | |
| def entity_lookup(entity): | |
| if not entity.strip(): | |
| return "Please enter an entity name" | |
| points = mnemo.entity_lookup(entity.strip()) | |
| if not points: | |
| return "No points found for: " + entity | |
| output = ["### " + entity + " (" + str(len(points)) + " points)\n"] | |
| for cp in points: | |
| line = "* **" + cp.get("point_type", "") + "**" | |
| if cp.get("connects_to"): | |
| line += " → " + cp["connects_to"] | |
| line += ": " + cp.get("value", "") | |
| if cp.get("reason"): | |
| line += " _(" + cp["reason"] + ")_" | |
| line += " [w=" + str(round(cp.get("weight", 0.5), 2)) + "]" | |
| output.append(line) | |
| return "\n".join(output) | |
| def get_all_points(): | |
| points = mnemo.list_points(limit=50) | |
| if not points: | |
| return "_No connection points stored_" | |
| output = [] | |
| for cp in points: | |
| line = "* **" + cp.get("entity", "") + "**." + cp.get("point_type", "") | |
| if cp.get("connects_to"): | |
| line += "→" + cp["connects_to"] | |
| line += ": " + cp.get("value", "")[:60] | |
| if len(cp.get("value", "")) > 60: | |
| line += "..." | |
| line += " [" + cp.get("category", "?") + ", w=" + str(round(cp.get("weight", 0.5), 2)) + "]" | |
| output.append(line) | |
| total_stats = mnemo.get_stats() | |
| total = total_stats.get("total_connection_points", len(points)) | |
| if total > 50: | |
| output.append("\n_...showing 50 of " + str(total) + " points_") | |
| return "\n".join(output) | |
| def delete_point_by_id(cp_id): | |
| if not cp_id.strip(): | |
| return "Please enter a CP ID", get_all_points() | |
| if mnemo.delete_point(cp_id.strip()): | |
| return "Deleted: " + cp_id, get_all_points() | |
| return "Not found: " + cp_id, get_all_points() | |
| # ============================================================================ | |
| # v6.0: THREAD & KNOT FUNCTIONS | |
| # ============================================================================ | |
| def create_thread(thread_id, name, entity, thread_type, point_ids_text): | |
| if not thread_id.strip() or not name.strip(): | |
| return "Thread ID and name are required", get_all_threads() | |
| point_ids = [p.strip() for p in point_ids_text.split(",") if p.strip()] if point_ids_text.strip() else [] | |
| tid = mnemo.add_thread( | |
| thread_id=thread_id.strip(), name=name.strip(), | |
| entity=entity.strip(), thread_type=thread_type, | |
| point_ids=point_ids if point_ids else None, | |
| ) | |
| return "Thread created: " + tid, get_all_threads() | |
| def trace_thread_fn(thread_id, direction, steps, from_position): | |
| if not thread_id.strip(): | |
| return "Please enter a thread ID" | |
| points = mnemo.trace_thread(thread_id.strip(), int(from_position), direction, int(steps)) | |
| if not points: | |
| return "No points found (thread empty or at boundary)" | |
| output = ["### Trace " + direction + " on " + thread_id + "\n"] | |
| for p in points: | |
| line = "* [" + p.get("category", "?").upper() + "] " | |
| line += p.get("entity", "") + " | " + p.get("point_type", "") | |
| if p.get("value"): | |
| line += ": " + p["value"][:80] | |
| output.append(line) | |
| return "\n".join(output) | |
| def create_knot(knot_id, name, thread_ids_text, pivot_type, reason): | |
| if not knot_id.strip() or not name.strip(): | |
| return "Knot ID and name required", get_all_knots() | |
| thread_ids = [t.strip() for t in thread_ids_text.split(",") if t.strip()] | |
| if not thread_ids: | |
| return "At least one thread ID required", get_all_knots() | |
| kid = mnemo.add_knot( | |
| knot_id=knot_id.strip(), name=name.strip(), | |
| thread_ids=thread_ids, pivot_type=pivot_type, | |
| reason=reason.strip(), | |
| ) | |
| return "Knot created: " + kid, get_all_knots() | |
| def get_knot_detail(knot_id): | |
| if not knot_id.strip(): | |
| return "Please enter a knot ID" | |
| ctx = mnemo.get_knot_context(knot_id.strip()) | |
| if not ctx: | |
| return "Knot not found: " + knot_id | |
| return "```json\n" + json.dumps(ctx, indent=2) + "\n```" | |
| def get_all_threads(): | |
| threads = mnemo.get_active_threads() | |
| if not threads: | |
| return "_No threads_" | |
| output = [] | |
| for t in threads: | |
| line = "* **" + t["id"] + "** [" + t.get("status", "active") + "] " | |
| line += t["name"] + " (" + t.get("thread_type", "plot_line") + ")" | |
| line += " | entity: " + t.get("entity", "") | |
| line += " | points: " + str(len(t.get("points", []))) | |
| line += " | pos: " + str(t.get("current_position", 0)) | |
| line += " | tension: " + str(round(t.get("tension_level", 0.5), 2)) | |
| tones = t.get("tone_trajectory", []) | |
| if tones: | |
| line += " | tone: " + " → ".join(tones[-3:]) | |
| output.append(line) | |
| return "\n".join(output) | |
| def get_all_knots(): | |
| knots = mnemo.list_knots() | |
| if not knots: | |
| return "_No knots_" | |
| output = [] | |
| for k in knots: | |
| line = "* **" + k["id"] + "** " + k["name"] | |
| line += " [" + k.get("pivot_type", "collision") + "] threads: " + ", ".join(k.get("threads", [])) | |
| if k.get("reason"): | |
| line += " | " + k["reason"][:60] | |
| output.append(line) | |
| return "\n".join(output) | |
| def get_active_threads_fn(): | |
| threads = mnemo.get_active_threads() | |
| if not threads: | |
| return "No active threads" | |
| output = ["### Active Threads (" + str(len(threads)) + ")\n"] | |
| for t in threads: | |
| line = "* **" + t["name"] + "** (" + t["id"] + ")" | |
| line += "\n Position: " + str(t["current_position"]) + "/" + str(len(t["points"])) | |
| line += " | Tension: " + str(t["tension_level"]) | |
| if t.get("tone_trajectory"): | |
| line += "\n Tone: " + " → ".join(t["tone_trajectory"][-3:]) | |
| output.append(line) | |
| return "\n".join(output) | |
| # ============================================================================ | |
| # GRADIO UI BLOCKS | |
| # ============================================================================ | |
| with gr.Blocks(title="Mnemo v7.0 - AI Agent Memory System") as demo: | |
| gr.Markdown("# Mnemo v7.0 - AI Agent Memory System\n\n" | |
| "**SQLite + FTS5 + FAISS + NumPy | ConnectionPoints | Threads & Knots | " | |
| "Graph Search | Two-Stage Retrieval**") | |
| with gr.Tab("Memory Store"): | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| memory_input = gr.Textbox(label="New Memory", | |
| placeholder="Enter content (short facts OK if high-value)...", lines=2) | |
| priority_slider = gr.Slider(0.0, 2.0, value=1.0, step=0.1, | |
| label="Priority (resists decay)") | |
| add_btn = gr.Button("Add Memory", variant="primary") | |
| add_status = gr.Markdown() | |
| gr.Markdown("---") | |
| delete_input = gr.Textbox(label="Delete by ID", placeholder="mem_abc123...") | |
| delete_btn = gr.Button("Delete Memory") | |
| with gr.Column(scale=3): | |
| memories_display = gr.Markdown(label="Stored Memories") | |
| with gr.Row(): | |
| init_btn = gr.Button("Load Examples") | |
| maintenance_btn = gr.Button("Run Maintenance") | |
| clear_btn = gr.Button("Clear All (WARNING: Syncs to Dataset!)", variant="stop") | |
| maintenance_status = gr.Markdown() | |
| add_btn.click(add_memory, inputs=[memory_input, priority_slider], | |
| outputs=[add_status, memories_display]) | |
| delete_btn.click(delete_memory_by_id, inputs=[delete_input], | |
| outputs=[add_status, memories_display]) | |
| init_btn.click(initialize_demo, outputs=[add_status, memories_display]) | |
| maintenance_btn.click(run_maintenance, outputs=[maintenance_status]) | |
| clear_btn.click(clear_memories, outputs=[add_status, memories_display]) | |
| with gr.Tab("Connection Points"): | |
| gr.Markdown("### Structured Memory — Atomic Entity/Attribute/Value/Reason Records\n" | |
| "Each point stores ONE fact with narrative context. " | |
| "Enables precise graph search instead of blob similarity.") | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| cp_entity = gr.Textbox(label="Entity", placeholder="e.g., Alistair Fitzroy") | |
| cp_type = gr.Textbox(label="Point Type", placeholder="e.g., fears, brother_of, tone") | |
| cp_value = gr.Textbox(label="Value", placeholder="e.g., developing dementia", lines=2) | |
| cp_connects = gr.Textbox(label="Connects To (optional)", placeholder="e.g., Elijah") | |
| cp_reason = gr.Textbox(label="Reason (WHY it matters)", placeholder="e.g., drives obsession with legacy", lines=2) | |
| cp_weight = gr.Slider(0.0, 1.0, value=0.5, step=0.1, label="Weight (narrative importance)") | |
| cp_category = gr.Dropdown(["character", "plot", "setting", "theme", "tone", "style", "fact"], | |
| value="character", label="Category") | |
| cp_source = gr.Dropdown(["manual", "auto_extract", "file_upload", "signal", "consolidation"], | |
| value="manual", label="Source") | |
| cp_add_btn = gr.Button("Add Connection Point", variant="primary") | |
| cp_status = gr.Markdown() | |
| gr.Markdown("---") | |
| cp_delete_input = gr.Textbox(label="Delete CP by ID", placeholder="cp_abc123...") | |
| cp_delete_btn = gr.Button("Delete Point") | |
| with gr.Column(scale=3): | |
| points_display = gr.Markdown(label="Connection Points") | |
| cp_add_btn.click(add_connection_point, | |
| inputs=[cp_entity, cp_type, cp_value, cp_connects, cp_reason, cp_weight, cp_category, cp_source], | |
| outputs=[cp_status, points_display]) | |
| cp_delete_btn.click(delete_point_by_id, inputs=[cp_delete_input], | |
| outputs=[cp_status, points_display]) | |
| with gr.Tab("Graph Search"): | |
| gr.Markdown("### Entity Graph Traversal (Primary) + FAISS Fallback\n" | |
| "Searches by entity name and relationship type first. " | |
| "Falls back to embedding similarity only when graph returns < 3 results.") | |
| with gr.Row(): | |
| with gr.Column(): | |
| gs_query = gr.Textbox(label="Query", placeholder="e.g., Alistair relationship with Elijah") | |
| gs_topk = gr.Slider(1, 30, value=15, step=1, label="Max Results") | |
| gs_btn = gr.Button("Graph Search", variant="primary") | |
| with gr.Column(): | |
| entity_input = gr.Textbox(label="Entity Lookup", placeholder="e.g., Sebastian") | |
| entity_btn = gr.Button("Lookup Entity") | |
| gs_results = gr.Markdown() | |
| entity_results = gr.Markdown() | |
| gs_btn.click(graph_search, inputs=[gs_query, gs_topk], outputs=[gs_results]) | |
| entity_btn.click(entity_lookup, inputs=[entity_input], outputs=[entity_results]) | |
| with gr.Tab("Search (Legacy)"): | |
| with gr.Row(): | |
| search_input = gr.Textbox(label="Query", | |
| placeholder="Search... (e.g. 'Sebastian captivity')") | |
| top_k_slider = gr.Slider(1, 20, value=15, step=1, label="Results Limit") | |
| search_btn = gr.Button("Search", variant="primary") | |
| search_results = gr.Markdown() | |
| search_btn.click(search_memories, inputs=[search_input, top_k_slider], | |
| outputs=[search_results]) | |
| with gr.Tab("Threads & Knots"): | |
| gr.Markdown("### Narrative Threading — Ordered Sequences with Tension & Tone Tracking") | |
| with gr.Accordion("Create Thread", open=False): | |
| with gr.Row(): | |
| t_id = gr.Textbox(label="Thread ID", placeholder="thread_sebastian_captivity") | |
| t_name = gr.Textbox(label="Name", placeholder="Sebastian's Captivity Arc") | |
| t_entity = gr.Textbox(label="Entity", placeholder="Sebastian") | |
| t_type = gr.Dropdown(["character_arc", "plot_line", "theme_thread", "relationship_arc"], | |
| value="plot_line", label="Thread Type") | |
| t_points = gr.Textbox(label="Point IDs (comma-separated)", placeholder="cp_abc, cp_def, ...") | |
| t_btn = gr.Button("Create Thread", variant="primary") | |
| t_status = gr.Markdown() | |
| with gr.Accordion("Trace Thread", open=False): | |
| with gr.Row(): | |
| trace_id = gr.Textbox(label="Thread ID", placeholder="thread_sebastian_captivity") | |
| trace_dir = gr.Dropdown(["back", "forward"], value="back", label="Direction") | |
| trace_steps = gr.Slider(1, 10, value=5, step=1, label="Steps") | |
| trace_pos = gr.Number(label="From Position (-1 = current)", value=-1) | |
| trace_btn = gr.Button("Trace") | |
| trace_results = gr.Markdown() | |
| with gr.Accordion("Active Threads", open=True): | |
| active_btn = gr.Button("Refresh Active Threads") | |
| active_display = gr.Markdown() | |
| gr.Markdown("---") | |
| gr.Markdown("### Knots — Where Threads Cross") | |
| with gr.Accordion("Create Knot", open=False): | |
| with gr.Row(): | |
| k_id = gr.Textbox(label="Knot ID", placeholder="knot_blackwood_arrival") | |
| k_name = gr.Textbox(label="Name", placeholder="Arrival at Blackwood Estate") | |
| k_threads = gr.Textbox(label="Thread IDs (comma-separated)", placeholder="thread_captivity, thread_isabella") | |
| k_pivot = gr.Dropdown(["collision", "revelation", "betrayal", "convergence", "divergence", "escalation"], | |
| value="collision", label="Pivot Type") | |
| k_reason = gr.Textbox(label="Reason", placeholder="Why do these threads cross here?", lines=2) | |
| k_btn = gr.Button("Create Knot", variant="primary") | |
| k_status = gr.Markdown() | |
| with gr.Accordion("Inspect Knot", open=False): | |
| knot_inspect_id = gr.Textbox(label="Knot ID", placeholder="knot_blackwood_arrival") | |
| knot_inspect_btn = gr.Button("Get Knot Context") | |
| knot_detail = gr.Markdown() | |
| threads_display = gr.Markdown(label="All Threads") | |
| knots_display = gr.Markdown(label="All Knots") | |
| t_btn.click(create_thread, inputs=[t_id, t_name, t_entity, t_type, t_points], | |
| outputs=[t_status, threads_display]) | |
| trace_btn.click(trace_thread_fn, inputs=[trace_id, trace_dir, trace_steps, trace_pos], | |
| outputs=[trace_results]) | |
| active_btn.click(get_active_threads_fn, outputs=[active_display]) | |
| k_btn.click(create_knot, inputs=[k_id, k_name, k_threads, k_pivot, k_reason], | |
| outputs=[k_status, knots_display]) | |
| knot_inspect_btn.click(get_knot_detail, inputs=[knot_inspect_id], outputs=[knot_detail]) | |
| with gr.Tab("Smart Injection"): | |
| gr.Markdown("### Memory Utility Predictor\n\n" | |
| "Cosine-similarity confidence with keyword boost. " | |
| "Decides when memory injection helps vs. hurts.") | |
| with gr.Row(): | |
| with gr.Column(): | |
| inj_query = gr.Textbox(label="Query", lines=2) | |
| inj_context = gr.Textbox(label="Context (optional)", lines=1) | |
| check_btn = gr.Button("Check", variant="primary") | |
| with gr.Column(): | |
| inj_result = gr.Markdown() | |
| check_btn.click(check_injection, inputs=[inj_query, inj_context], | |
| outputs=[inj_result]) | |
| gr.Markdown("### Get Context") | |
| with gr.Row(): | |
| ctx_query = gr.Textbox(label="Query", placeholder="Query for context...") | |
| ctx_k = gr.Slider(1, 20, value=15, step=1, label="Memories") | |
| ctx_btn = gr.Button("Get Context") | |
| ctx_output = gr.Markdown() | |
| ctx_btn.click(get_context_for_injection, inputs=[ctx_query, ctx_k], | |
| outputs=[ctx_output]) | |
| with gr.Tab("Stats"): | |
| stats_btn = gr.Button("Refresh") | |
| stats_display = gr.Markdown() | |
| stats_btn.click(get_stats, outputs=[stats_display]) | |
| with gr.Tab("About"): | |
| gr.Markdown("""## Mnemo v7.0 Architecture | |
| ### What Changed from v6.5 | |
| **SQLite (WAL mode)** — Source of truth. ACID transactions, zero-config, single-file backup. | |
| Replaces all in-memory dict storage. Embeddings stored as raw BLOB (not base64 JSON). | |
| **FTS5 Full-Text Search** — Porter stemming, BM25 ranking, automatic sync triggers. | |
| Replaces manual keyword extraction. Searches entity names, values, connections, reasons. | |
| **Two-Stage Retrieval** — FAISS pre-filters 10K→200 candidates (~5ms), then NumPy | |
| re-ranks with exact cosine similarity (~1ms). Fixes the accuracy gap from IndexIDMap drift. | |
| **Disposable FAISS** — Rebuilt from SQLite on startup. No IndexIDMap wrapper, no remove_ids() | |
| fragmentation, no str↔int mapping drift. Plain IndexFlatIP with positional id_map. | |
| **SQLite Indices** — Replace EntityIndex hash maps. `COLLATE NOCASE` eliminates manual | |
| normalization. `DELETE CASCADE` replaces O(n²) session cleanup. | |
| **Direct .db Upload** — SQLite file uploaded to HF Datasets as-is. No serialize/deserialize | |
| step. WAL checkpoint before upload. 10x faster sync at scale. | |
| ### Backward Compatibility | |
| All v6.5 API endpoints preserved with identical signatures. Gradio API, MCP server, | |
| and Streamlit client require zero changes. | |
| """) | |
| # ================================================================= | |
| # MCP-ONLY API ENDPOINTS (gr.api) | |
| # Fully mapped to mnemo_client.py expectations. | |
| # PROPERLY INDENTED inside `with gr.Blocks() as demo:` | |
| # ================================================================= | |
| # --- BLOB MEMORY APIs --- | |
| def add_api(content: str, namespace: str, metadata_json: str, priority: str) -> str: | |
| meta = json.loads(metadata_json) if metadata_json else {} | |
| mem_id = mnemo.engine.add(content, namespace, meta, float(priority)) | |
| return json.dumps({"id": mem_id} if mem_id else {"error": "Quality too low"}) | |
| gr.api(add_api, api_name="add_api") | |
| def search_api(query: str, limit: str, namespace: str) -> str: | |
| results = mnemo.engine.search(query, int(limit), namespace if namespace else None) | |
| return json.dumps([r.to_dict() for r in results], default=str) | |
| gr.api(search_api, api_name="search_api") | |
| def should_inject_api(query: str, context: str, history: str) -> str: | |
| should, reason, conf = mnemo.engine.should_inject(query, context, history) | |
| return json.dumps({"should_inject": should, "reason": reason, "confidence": conf}) | |
| gr.api(should_inject_api, api_name="should_inject_api") | |
| def get_context_api(query: str, limit: str) -> str: | |
| ctx = mnemo.engine.get_context(query, int(limit)) | |
| return json.dumps({"context": ctx}) | |
| gr.api(get_context_api, api_name="get_context_api") | |
| def get_memory_api(memory_id: str) -> str: | |
| mem = mnemo.engine.get(memory_id) | |
| return json.dumps(mem if mem else {"error": "Not found"}, default=str) | |
| gr.api(get_memory_api, api_name="get_memory") | |
| def delete_memory_api(memory_id: str) -> str: | |
| success = mnemo.engine.delete(memory_id) | |
| return json.dumps({"deleted": success}) | |
| gr.api(delete_memory_api, api_name="delete_memory_api") | |
| def list_memories_api(namespace: str = "") -> str: | |
| memories = mnemo.engine.list_all() | |
| if namespace: memories = [m for m in memories if m.get("namespace") == namespace] | |
| return json.dumps(memories, default=str) | |
| gr.api(list_memories_api, api_name="list_memories") | |
| def stats_api() -> str: | |
| return json.dumps(mnemo.engine.get_stats(), default=str) | |
| gr.api(stats_api, api_name="stats_api") | |
| def maintenance_api() -> str: | |
| return json.dumps(mnemo.engine.maintenance(), default=str) | |
| gr.api(maintenance_api, api_name="maintenance_api") | |
| def clear_api(confirm: str) -> str: | |
| if confirm.lower() == "true": | |
| mnemo.engine.clear() | |
| return json.dumps({"cleared": True}) | |
| return json.dumps({"cleared": False}) | |
| gr.api(clear_api, api_name="clear_api") | |
| # --- CONNECTION POINT APIs --- | |
| def add_point_api(entity: str, point_type: str, value: str, connects_to: str, reason: str, weight: str, category: str, session_id: str, source: str, thread_id: str, position: str, namespace: str = "default") -> str: | |
| cp_id = mnemo.engine.add_point( | |
| entity=entity, point_type=point_type, value=value, | |
| connects_to=connects_to, reason=reason, weight=float(weight), | |
| category=category, session_id=session_id, source=source, | |
| thread_id=thread_id, position=int(position), namespace=namespace | |
| ) | |
| return json.dumps({"id": cp_id} if cp_id else {"error": "Failed"}) | |
| gr.api(add_point_api, api_name="add_point_api") | |
| def add_points_batch_api(points_json: str) -> str: | |
| try: | |
| ids = mnemo.engine.add_points_batch(json.loads(points_json)) | |
| return json.dumps({"created": ids, "count": len(ids)}) | |
| except Exception as e: return json.dumps({"error": str(e)}) | |
| gr.api(add_points_batch_api, api_name="add_points_batch") | |
| def graph_search_api(query: str, top_k: str, active_sessions_json: str = "[]") -> str: | |
| # Decode the JSON list back into a Python list | |
| active_sessions = json.loads(active_sessions_json) if active_sessions_json and active_sessions_json != "[]" else None | |
| results = mnemo.engine.graph_search(query, int(top_k), active_sessions=active_sessions) | |
| return json.dumps(results, default=str) | |
| gr.api(graph_search_api, api_name="graph_search_api") | |
| def entity_lookup_api(entity: str) -> str: | |
| points = mnemo.engine.entity_lookup(entity) | |
| return json.dumps(points, default=str) | |
| gr.api(entity_lookup_api, api_name="entity_lookup_api") | |
| def get_point_api(cp_id: str) -> str: | |
| cp = mnemo.engine.get_point(cp_id) | |
| return json.dumps(cp if cp else {"error": "Not found"}, default=str) | |
| gr.api(get_point_api, api_name="get_point") | |
| def delete_point_api(cp_id: str) -> str: | |
| return json.dumps({"deleted": mnemo.engine.delete_point(cp_id)}) | |
| gr.api(delete_point_api, api_name="delete_point_api") | |
| def update_point_api(updates_json: str) -> str: | |
| try: | |
| updates = json.loads(updates_json) | |
| cp_id = updates.pop("cp_id", "") | |
| if not cp_id: | |
| return json.dumps({"error": "cp_id required"}) | |
| # Convert weight to float if present | |
| if "weight" in updates and updates["weight"] is not None: | |
| updates["weight"] = float(updates["weight"]) | |
| result = mnemo.engine.update_point(cp_id, **updates) | |
| return json.dumps(result if result else {"error": "Not found"}, default=str) | |
| except Exception as e: | |
| return json.dumps({"error": str(e)}) | |
| gr.api(update_point_api, api_name="update_point_api") | |
| def list_points_api(limit: str = "200") -> str: | |
| points = mnemo.engine.list_points(limit=int(limit)) | |
| return json.dumps(points, default=str) | |
| gr.api(list_points_api, api_name="list_points") | |
| # --- THREAD & KNOT APIs --- | |
| def create_thread_api(thread_id: str, name: str, entity: str, thread_type: str, session_id: str, point_ids_json: str = "[]") -> str: | |
| point_ids = json.loads(point_ids_json) if point_ids_json else [] | |
| tid = mnemo.engine.add_thread(thread_id, name, entity, thread_type, session_id, point_ids=point_ids) | |
| return json.dumps({"id": tid}) | |
| gr.api(create_thread_api, api_name="create_thread_api") | |
| def advance_thread_api(thread_id: str, new_position: str) -> str: | |
| success = mnemo.engine.advance_thread(thread_id, int(new_position)) | |
| return json.dumps({"status": "success"} if success else {"error": "Not found"}) | |
| gr.api(advance_thread_api, api_name="advance_thread") | |
| def trace_thread_api(thread_id: str, direction: str, steps: str, from_position: str) -> str: | |
| points = mnemo.engine.trace_thread(thread_id, int(from_position), direction, int(steps)) | |
| return json.dumps(points, default=str) | |
| gr.api(trace_thread_api, api_name="trace_thread_api") | |
| def active_threads_api() -> str: | |
| return json.dumps(mnemo.engine.get_active_threads(), default=str) | |
| gr.api(active_threads_api, api_name="active_threads_api") | |
| def get_thread_api(thread_id: str) -> str: | |
| t = mnemo.engine.get_thread(thread_id) | |
| return json.dumps(t if t else {"error": "Not found"}, default=str) | |
| gr.api(get_thread_api, api_name="get_thread") | |
| def delete_thread_api(thread_id: str) -> str: | |
| return json.dumps({"deleted": mnemo.engine.delete_thread(thread_id)}) | |
| gr.api(delete_thread_api, api_name="delete_thread") | |
| def create_knot_api(knot_id: str, name: str, thread_ids_json: str, pivot_type: str, reason: str, session_id: str) -> str: | |
| kid = mnemo.engine.add_knot(knot_id, name, json.loads(thread_ids_json), pivot_type, reason, session_id) | |
| return json.dumps({"id": kid}) | |
| gr.api(create_knot_api, api_name="create_knot_api") | |
| def knot_context_api(knot_id: str) -> str: | |
| ctx = mnemo.engine.get_knot_context(knot_id) | |
| return json.dumps(ctx if ctx else {"error": "Not found"}, default=str) | |
| gr.api(knot_context_api, api_name="knot_context_api") | |
| def list_knots_api() -> str: | |
| knots = mnemo.engine.list_knots() | |
| return json.dumps(knots, default=str) | |
| gr.api(list_knots_api, api_name="list_knots") | |
| def delete_knot_api(knot_id: str) -> str: | |
| return json.dumps({"deleted": mnemo.engine.delete_knot(knot_id)}) | |
| gr.api(delete_knot_api, api_name="delete_knot") | |
| # --- SESSION CLEANUP --- | |
| def delete_session_api(session_id: str) -> str: | |
| try: | |
| result = mnemo.engine.delete_session(session_id) | |
| return json.dumps(result) | |
| except Exception as e: | |
| return json.dumps({"error": str(e)}) | |
| gr.api(delete_session_api, api_name="delete_session") | |
| # ================================================================= | |
| # MAIN EXECUTION (Unindented, sits outside the blocks context) | |
| # ================================================================= | |
| if __name__ == "__main__": | |
| demo.launch(server_name="0.0.0.0", server_port=7860, mcp_server=True) |