MnemoCore / config.yaml
Granis87's picture
Upload folder using huggingface_hub
7c8b011 verified
# HAIM Configuration — Phase 4.5
# All hardcoded constants are centralized here.
haim:
version: "4.5"
dimensionality: 16384 # 2^14, must be multiple of 64
# Vector encoding
encoding:
mode: "binary" # "binary" (Phase 3.0+) or "float" (legacy)
token_method: "bundle" # "bundle" (XOR+permute) or "hash"
# Memory tier thresholds
tiers:
hot:
max_memories: 3000
ltp_threshold_min: 0.7
eviction_policy: "lru"
warm:
max_memories: 100000
ltp_threshold_min: 0.3
consolidation_interval_hours: 1
storage_backend: "mmap" # "mmap" (Phase 3.0) or "qdrant" (Phase 3.5)
cold:
storage_backend: "filesystem" # "filesystem" or "s3"
compression: "gzip"
archive_threshold_days: 30
# LTP (Long-Term Potentiation) decay parameters
ltp:
initial_importance: 0.5
decay_lambda: 0.01 # Exponential decay rate
permanence_threshold: 0.95 # Above this, memory is immune to decay
half_life_days: 30.0 # For synaptic connections
# Hysteresis (prevent boundary thrashing between tiers)
hysteresis:
promote_delta: 0.15 # LTP must exceed threshold by this much to promote
demote_delta: 0.10 # LTP must fall below threshold by this much to demote
# Redis (Phase 3.5)
redis:
url: "redis://localhost:6379/0"
stream_key: "haim:subconscious"
max_connections: 10
socket_timeout: 5
# Qdrant (Phase 3.5)
qdrant:
url: "http://localhost:6333"
collection_hot: "haim_hot"
collection_warm: "haim_warm"
binary_quantization: true
always_ram: true
hnsw_m: 16
hnsw_ef_construct: 100
# GPU (Phase 3.5)
gpu:
enabled: false
device: "cuda:0"
batch_size: 1000
fallback_to_cpu: true
# Observability (Phase 3.5)
observability:
metrics_port: 9090
log_level: "INFO"
structured_logging: true
# Persistence paths
paths:
data_dir: "./data"
memory_file: "./data/memory.jsonl"
codebook_file: "./data/codebook.json"
concepts_file: "./data/concepts.json"
synapses_file: "./data/synapses.json"
warm_mmap_dir: "./data/warm_tier"
cold_archive_dir: "./data/cold_archive"
# Security (Phase 3.5.1)
security:
# api_key: "mnemocore-beta-key" # <--- REMOVED: Must be set via HAIM_API_KEY env var or here explicitly
# MCP (Model Context Protocol) bridge
mcp:
enabled: true
transport: "stdio" # "stdio" recommended for local MCP clients
host: "127.0.0.1"
port: 8110
api_base_url: "http://localhost:8100"
timeout_seconds: 15
allow_tools:
- "memory_store"
- "memory_query"
- "memory_get"
- "memory_delete"
- "memory_stats"
- "memory_health"
# Dream Loop (Subconscious background processing)
dream_loop:
enabled: true
frequency_seconds: 60 # Seconds between dream cycles
batch_size: 10 # Number of memories to process per cycle
max_iterations: 0 # Maximum iterations (0 = unlimited)
subconscious_queue_maxlen: 10000 # Max queued IDs (null/0 = unlimited)
ollama_url: "http://localhost:11434/api/generate"
model: "gemma3:1b"
# Phase 4.0+: Semantic Consolidation
consolidation:
enabled: true
interval_seconds: 3600 # 1 hour between consolidation cycles
similarity_threshold: 0.85 # Hamming similarity threshold (0.85 = 15% distance)
min_cluster_size: 2 # Minimum cluster size for merging
hot_tier_enabled: true # Consolidate HOT tier
warm_tier_enabled: true # Consolidate WARM tier
# Phase 4.1: XOR-based Project Isolation
attention_masking:
enabled: true # Enable/disable project-based memory isolation
# =========================================================================
# Subconscious AI - BETA FEATURE
# =========================================================================
# This is a BETA feature that enables autonomous background AI processing
# for memory management, dream synthesis, and micro-self-improvement.
#
# WARNING: This feature is experimental and may change without notice.
# Must be explicitly enabled by setting 'enabled: true'.
# All safety defaults are conservative - review before enabling in production.
# =========================================================================
subconscious_ai:
# BETA FEATURE - Must be explicitly enabled
enabled: true
beta_mode: true
# Model configuration
model_provider: "ollama" # ollama | lm_studio | openai_api | anthropic_api
model_name: "phi3.5:latest"
model_url: "http://localhost:11434"
# api_key: null # For API providers
# api_base_url: null
# Pulse configuration
pulse_interval_seconds: 120
pulse_backoff_enabled: true
pulse_backoff_max_seconds: 600
# Resource management
max_cpu_percent: 30.0
cycle_timeout_seconds: 120
rate_limit_per_hour: 50
# Operations
memory_sorting_enabled: true
enhanced_dreaming_enabled: true
micro_self_improvement_enabled: true # Initially disabled
# Safety
dry_run: false
log_all_decisions: true
audit_trail_path: "./data/subconscious_audit.jsonl"
max_memories_per_cycle: 10