|
|
|
|
|
|
| haim:
|
| version: "4.5"
|
| dimensionality: 16384
|
|
|
|
|
| encoding:
|
| mode: "binary"
|
| token_method: "bundle"
|
|
|
|
|
| tiers:
|
| hot:
|
| max_memories: 3000
|
| ltp_threshold_min: 0.7
|
| eviction_policy: "lru"
|
|
|
| warm:
|
| max_memories: 100000
|
| ltp_threshold_min: 0.3
|
| consolidation_interval_hours: 1
|
| storage_backend: "mmap"
|
|
|
| cold:
|
| storage_backend: "filesystem"
|
| compression: "gzip"
|
| archive_threshold_days: 30
|
|
|
|
|
| ltp:
|
| initial_importance: 0.5
|
| decay_lambda: 0.01
|
| permanence_threshold: 0.95
|
| half_life_days: 30.0
|
|
|
|
|
| hysteresis:
|
| promote_delta: 0.15
|
| demote_delta: 0.10
|
|
|
|
|
| redis:
|
| url: "redis://localhost:6379/0"
|
| stream_key: "haim:subconscious"
|
| max_connections: 10
|
| socket_timeout: 5
|
|
|
|
|
| qdrant:
|
| url: "http://localhost:6333"
|
| collection_hot: "haim_hot"
|
| collection_warm: "haim_warm"
|
| binary_quantization: true
|
| always_ram: true
|
| hnsw_m: 16
|
| hnsw_ef_construct: 100
|
|
|
|
|
| gpu:
|
| enabled: false
|
| device: "cuda:0"
|
| batch_size: 1000
|
| fallback_to_cpu: true
|
|
|
|
|
| observability:
|
| metrics_port: 9090
|
| log_level: "INFO"
|
| structured_logging: true
|
|
|
|
|
| paths:
|
| data_dir: "./data"
|
| memory_file: "./data/memory.jsonl"
|
| codebook_file: "./data/codebook.json"
|
| concepts_file: "./data/concepts.json"
|
| synapses_file: "./data/synapses.json"
|
| warm_mmap_dir: "./data/warm_tier"
|
| cold_archive_dir: "./data/cold_archive"
|
|
|
|
|
| security:
|
|
|
|
|
|
|
| mcp:
|
| enabled: true
|
| transport: "stdio"
|
| host: "127.0.0.1"
|
| port: 8110
|
| api_base_url: "http://localhost:8100"
|
| timeout_seconds: 15
|
| allow_tools:
|
| - "memory_store"
|
| - "memory_query"
|
| - "memory_get"
|
| - "memory_delete"
|
| - "memory_stats"
|
| - "memory_health"
|
|
|
|
|
| dream_loop:
|
| enabled: true
|
| frequency_seconds: 60
|
| batch_size: 10
|
| max_iterations: 0
|
| subconscious_queue_maxlen: 10000
|
| ollama_url: "http://localhost:11434/api/generate"
|
| model: "gemma3:1b"
|
|
|
|
|
| consolidation:
|
| enabled: true
|
| interval_seconds: 3600
|
| similarity_threshold: 0.85
|
| min_cluster_size: 2
|
| hot_tier_enabled: true
|
| warm_tier_enabled: true
|
|
|
|
|
| attention_masking:
|
| enabled: true
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| subconscious_ai:
|
|
|
| enabled: true
|
| beta_mode: true
|
|
|
|
|
| model_provider: "ollama"
|
| model_name: "phi3.5:latest"
|
| model_url: "http://localhost:11434"
|
|
|
|
|
|
|
|
|
| pulse_interval_seconds: 120
|
| pulse_backoff_enabled: true
|
| pulse_backoff_max_seconds: 600
|
|
|
|
|
| max_cpu_percent: 30.0
|
| cycle_timeout_seconds: 120
|
| rate_limit_per_hour: 50
|
|
|
|
|
| memory_sorting_enabled: true
|
| enhanced_dreaming_enabled: true
|
| micro_self_improvement_enabled: true
|
|
|
|
|
| dry_run: false
|
| log_all_decisions: true
|
| audit_trail_path: "./data/subconscious_audit.jsonl"
|
| max_memories_per_cycle: 10
|
|
|