SPFsmartGATE / src /learning.rs
JosephStoneCellAI's picture
Upload 45 files
1269259 verified
// SPF Smart Gateway - Online Learning Engine (Block M)
// Copyright 2026 Joseph Stone - All Rights Reserved
//
// Online EWC (Elastic Weight Consolidation) + Experience Replay + LR Scheduling.
// Gate-as-teacher reinforcement: approve/deny = training labels.
// Train-on-copy pattern: inference never blocked by training.
// Confidence bands: auto-allow / ask-user / auto-block thresholds.
// FP-locked replay: false positive examples never evicted.
//
// Depends on: tensor.rs (Layer 0), train.rs (Block L), gate_training.rs (Block J)
//
// Research basis: CLONE_RESEARCH_FINDINGS.txt
// - Online EWC: markaicode.com/elastic-weight-consolidation
// - Experience replay: 1000-5000 buffer, 50/50 mix, FP-locked slots
// - Train-on-copy: neural-redis pattern (atomic weight merge)
// - LR scheduling: warmup + cosine annealing
// - Convergence detection: 95%+ alignment for 1000 decisions
use crate::tensor::Tensor;
use crate::train::TrainingExample;
#[cfg(test)]
use crate::train::TrainingTarget;
use crate::gate_training::TrainingSignal;
// FL-9: LRScheduler removed — gate is deterministic, no warmup needed.
// Learning rate comes directly from TransformerConfig.learning_rate.
// ============================================================================
// ELASTIC WEIGHT CONSOLIDATION (Online EWC)
// ============================================================================
/// Online EWC: penalizes changes to important weights.
///
/// Loss_total = task_loss + lambda * SUM(F_i * (theta_i - theta*_i)^2)
///
/// Online variant: F_new = decay * F_old + F_current
/// Memory: param_count × 4 bytes × 2 (Fisher + reference) = ~40MB for 5M params
pub struct OnlineEWC {
pub fisher: Vec<f32>,
pub reference_weights: Vec<f32>,
pub lambda: f32,
pub fisher_decay: f32,
pub active: bool,
pub update_count: u64,
}
impl OnlineEWC {
pub fn new(total_params: usize, lambda: f32) -> Self {
Self {
fisher: vec![0.0; total_params],
reference_weights: vec![0.0; total_params],
lambda,
fisher_decay: 0.9,
active: false,
update_count: 0,
}
}
/// Compute EWC penalty loss and gradients
pub fn penalty(&self, current_weights: &[f32]) -> (f32, Vec<f32>) {
if !self.active {
return (0.0, vec![0.0; current_weights.len()]);
}
let mut loss = 0.0f32;
let mut grads = vec![0.0f32; current_weights.len()];
for i in 0..current_weights.len().min(self.fisher.len()) {
let diff = current_weights[i] - self.reference_weights[i];
loss += self.fisher[i] * diff * diff;
grads[i] = 2.0 * self.lambda * self.fisher[i] * diff;
}
(0.5 * self.lambda * loss, grads)
}
/// Online Fisher update: F = decay * F_old + (1-decay) * grad^2
pub fn update_fisher(&mut self, gradients: &[f32]) {
let decay = self.fisher_decay;
for i in 0..self.fisher.len().min(gradients.len()) {
let new_fisher = gradients[i] * gradients[i];
self.fisher[i] = decay * self.fisher[i] + (1.0 - decay) * new_fisher;
}
self.update_count += 1;
}
/// Snapshot current weights as reference
pub fn snapshot_weights(&mut self, weights: &[f32]) {
self.reference_weights = weights.to_vec();
self.active = true;
}
pub fn memory_bytes(&self) -> usize {
(self.fisher.len() + self.reference_weights.len()) * 4
}
pub fn save_state(&self) -> (Vec<f32>, Vec<f32>, f32, u64) {
(self.fisher.clone(), self.reference_weights.clone(), self.lambda, self.update_count)
}
pub fn load_state(&mut self, fisher: Vec<f32>, ref_weights: Vec<f32>, lambda: f32, count: u64) {
self.fisher = fisher;
self.reference_weights = ref_weights;
self.lambda = lambda;
self.update_count = count;
self.active = !self.reference_weights.is_empty()
&& self.reference_weights.iter().any(|&w| w != 0.0);
}
/// Export Fisher information as Tensor (for mesh weight sync / federated EWC)
pub fn fisher_as_tensor(&self) -> Tensor {
Tensor::from_data(self.fisher.clone(), vec![self.fisher.len()])
.unwrap_or_else(|_| Tensor::zeros(&[self.fisher.len()]))
}
/// FL-4: Save EWC state to binary file for persistence across restarts.
/// Format: [u32:param_count][f32:lambda][u64:update_count][f32×N:fisher][f32×N:ref_weights]
pub fn save_to_file(&self, path: &std::path::Path) -> std::io::Result<()> {
use std::io::Write;
let mut f = std::fs::File::create(path)?;
let count = self.fisher.len() as u32;
f.write_all(&count.to_le_bytes())?;
f.write_all(&self.lambda.to_le_bytes())?;
f.write_all(&self.update_count.to_le_bytes())?;
for &v in &self.fisher {
f.write_all(&v.to_le_bytes())?;
}
for &v in &self.reference_weights {
f.write_all(&v.to_le_bytes())?;
}
f.flush()
}
/// FL-4: Load EWC state from binary file.
pub fn load_from_file(path: &std::path::Path) -> std::io::Result<Self> {
use std::io::Read;
let mut f = std::fs::File::open(path)?;
let mut buf4 = [0u8; 4];
let mut buf8 = [0u8; 8];
f.read_exact(&mut buf4)?;
let count = u32::from_le_bytes(buf4) as usize;
f.read_exact(&mut buf4)?;
let lambda = f32::from_le_bytes(buf4);
f.read_exact(&mut buf8)?;
let update_count = u64::from_le_bytes(buf8);
let mut fisher = vec![0.0f32; count];
for v in &mut fisher {
f.read_exact(&mut buf4)?;
*v = f32::from_le_bytes(buf4);
}
let mut reference_weights = vec![0.0f32; count];
for v in &mut reference_weights {
f.read_exact(&mut buf4)?;
*v = f32::from_le_bytes(buf4);
}
let active = reference_weights.iter().any(|&w| w != 0.0);
Ok(Self {
fisher,
reference_weights,
lambda,
fisher_decay: 0.9,
active,
update_count,
})
}
}
// ============================================================================
// EXPERIENCE REPLAY BUFFER — with FP-locked slots
// ============================================================================
/// Ring buffer with FP-locked slots that never get evicted.
///
/// Regular examples cycle out when full. FP examples are permanent —
/// they represent security failures and are the most valuable training data.
/// FP-locked signals are included in EVERY training batch.
pub struct ExperienceReplay {
/// Regular ring buffer (cycled when full)
buffer: Vec<TrainingExample>,
/// FP-locked examples (NEVER evicted)
fp_locked: Vec<TrainingExample>,
capacity: usize,
write_pos: usize,
total_added: u64,
}
impl ExperienceReplay {
pub fn new(capacity: usize) -> Self {
Self {
buffer: Vec::with_capacity(capacity),
fp_locked: Vec::new(),
capacity,
write_pos: 0,
total_added: 0,
}
}
/// Add an example. FP examples (weight >= 4.0) go to locked store.
pub fn add(&mut self, example: TrainingExample) {
if example.weight >= 4.0 {
// FP-locked: never evicted
self.fp_locked.push(example);
} else {
if self.buffer.len() < self.capacity {
self.buffer.push(example);
} else {
self.buffer[self.write_pos] = example;
}
self.write_pos = (self.write_pos + 1) % self.capacity;
}
self.total_added += 1;
}
/// Sample n random examples + ALL FP-locked examples
pub fn sample(&self, n: usize, seed: u64) -> Vec<TrainingExample> {
let mut samples = Vec::new();
// Always include ALL FP-locked examples
samples.extend(self.fp_locked.iter().cloned());
// Sample from regular buffer
if !self.buffer.is_empty() {
let count = n.min(self.buffer.len());
let mut state = seed;
for _ in 0..count {
state = xorshift64(state);
let idx = (state as usize) % self.buffer.len();
samples.push(self.buffer[idx].clone());
}
}
samples
}
pub fn len(&self) -> usize {
self.buffer.len() + self.fp_locked.len()
}
pub fn regular_len(&self) -> usize {
self.buffer.len()
}
pub fn fp_locked_len(&self) -> usize {
self.fp_locked.len()
}
pub fn is_empty(&self) -> bool {
self.buffer.is_empty() && self.fp_locked.is_empty()
}
pub fn fill_ratio(&self) -> f32 {
self.buffer.len() as f32 / self.capacity.max(1) as f32
}
pub fn total_added(&self) -> u64 {
self.total_added
}
}
fn xorshift64(mut state: u64) -> u64 {
if state == 0 { state = 0xdeadbeef; }
state ^= state << 13;
state ^= state >> 7;
state ^= state << 17;
state
}
// ============================================================================
// CONFIDENCE DECISION — auto-allow / ask-user / auto-block
// ============================================================================
/// Transformer outputs a confidence score (0.0 to 1.0).
/// These thresholds determine automatic vs manual gate decisions.
#[derive(Debug, Clone)]
pub struct ConfidenceConfig {
/// Above this: auto-allow (default: 0.8)
pub allow_threshold: f32,
/// Below this: auto-block (default: 0.2)
pub block_threshold: f32,
// Between thresholds: ask the user
}
impl Default for ConfidenceConfig {
fn default() -> Self {
Self {
allow_threshold: 0.8,
block_threshold: 0.2,
}
}
}
/// Result of confidence-based gate decision
#[derive(Debug, Clone, PartialEq)]
pub enum ConfidenceDecision {
/// Model is confident: auto-allow (confidence > allow_threshold)
AutoAllow(f32),
/// Model is confident: auto-block (confidence < block_threshold)
AutoBlock(f32),
/// Model is uncertain: ask the user (confidence between thresholds)
AskUser(f32),
}
impl ConfidenceConfig {
/// Decide based on model confidence score
pub fn decide(&self, confidence: f32) -> ConfidenceDecision {
if confidence >= self.allow_threshold {
ConfidenceDecision::AutoAllow(confidence)
} else if confidence <= self.block_threshold {
ConfidenceDecision::AutoBlock(confidence)
} else {
ConfidenceDecision::AskUser(confidence)
}
}
}
// FL-9: LearningConfig and LearningController removed — dead code.
// Training is driven by handle_train() (FL-2) reading tlog:* from LMDB.
// EWC is wired directly in handle_train() (FL-4).
// Batch trigger is in run_router() (FL-10).
/// Convert a TrainingSignal to token IDs for transformer input.
/// Encodes: [TOOL] tool_tokens [GATE] source_tokens [SPF] preceding_tools
/// Uses BPE tokenizer (Block B) for proper subword encoding.
pub fn signal_to_tokens(signal: &TrainingSignal) -> Vec<usize> {
use crate::tokenizer::{Tokenizer, TOOL_ID, GATE_ID, SPF_ID};
let tokenizer = Tokenizer::new();
let mut tokens: Vec<usize> = Vec::new();
// [TOOL] special token + BPE-encoded tool name
tokens.push(TOOL_ID as usize);
tokens.extend(tokenizer.encode(&signal.tool).iter().map(|&id| id as usize));
// [GATE] special token + BPE-encoded source
tokens.push(GATE_ID as usize);
tokens.extend(tokenizer.encode(&signal.source).iter().map(|&id| id as usize));
// Encode sequence context (preceding tools) with SPF separators
if !signal.preceding_tools.is_empty() {
tokens.push(SPF_ID as usize);
for prev_tool in &signal.preceding_tools {
tokens.extend(tokenizer.encode(prev_tool).iter().map(|&id| id as usize));
tokens.push(SPF_ID as usize);
}
}
// Encode recent call frequency as repeated token
// High frequency = more tokens = model sees the pattern
let freq_token = 6_usize;
for _ in 0..signal.recent_call_count.min(10) {
tokens.push(freq_token);
}
tokens
}
// FL-9: LearningStatus removed — metrics reported directly from TransformerState
// and EWC/ConfusionMatrix in LMDB (FL-4, FL-5).
// ============================================================================
// MESH STREAM HANDLER — BrainSync
// ============================================================================
/// Handle an incoming BrainSync mesh frame.
/// Receives knowledge-sharing signals from peer nodes (training signals,
/// experience replay data, confusion matrix updates).
/// Parses JSON payload, validates structure, returns acknowledgment.
/// Zero silent drops.
///
/// Called from: mesh.rs stream_router() for StreamType::BrainSync (0x06)
pub fn handle_brain_sync(
frame: &crate::framing::Frame,
peer_key: &str,
_transformer: &Option<std::sync::Arc<std::sync::RwLock<crate::transformer_tools::TransformerState>>>,
) -> Option<crate::framing::Frame> {
let payload = match frame.payload_str() {
Ok(s) => s,
Err(e) => {
eprintln!("[SPF-BRAIN-SYNC] Invalid UTF-8 from {}: {}", &peer_key[..8.min(peer_key.len())], e);
let err = serde_json::json!({
"type": "brain_sync_error",
"error": "Invalid UTF-8 payload",
"from": peer_key,
});
return Some(crate::framing::Frame::new(
crate::framing::StreamType::BrainSync,
err.to_string().into_bytes(),
));
}
};
let data: serde_json::Value = match serde_json::from_str(payload) {
Ok(v) => v,
Err(e) => {
eprintln!("[SPF-BRAIN-SYNC] Invalid JSON from {}: {}", &peer_key[..8.min(peer_key.len())], e);
let err = serde_json::json!({
"type": "brain_sync_error",
"error": format!("JSON parse: {}", e),
"from": peer_key,
});
return Some(crate::framing::Frame::new(
crate::framing::StreamType::BrainSync,
err.to_string().into_bytes(),
));
}
};
let sync_type = data.get("type").and_then(|v| v.as_str()).unwrap_or("unknown");
let signal_count = data.get("signals")
.and_then(|v| v.as_array())
.map(|a| a.len())
.unwrap_or(0);
eprintln!("[SPF-BRAIN-SYNC] Received {} from {}: {} signals",
sync_type, &peer_key[..8.min(peer_key.len())], signal_count);
// FL-9: Store incoming mesh signals as tlog:* entries in LMDB.
// handle_train() reads tlog:* keys — same path for local and mesh signals.
let mut signals_processed: usize = 0;
if let Some(signals_array) = data.get("signals").and_then(|v| v.as_array()) {
let db_path = crate::paths::spf_root().join("LIVE/LMDB5/LMDB5.DB");
if let Ok(db) = crate::agent_state::AgentStateDb::open(&db_path) {
for signal_json in signals_array {
if let Ok(signal) = serde_json::from_value::<
crate::gate_training::TrainingSignal
>(signal_json.clone()) {
if let Ok(json_str) = serde_json::to_string(&signal) {
let tlog_key = format!("tlog:{}", signal.timestamp);
let _ = db.set_state(&tlog_key, &json_str);
signals_processed += 1;
}
}
}
}
}
let ack = serde_json::json!({
"type": "brain_sync_ack",
"sync_type": sync_type,
"signals_received": signal_count,
"signals_processed": signals_processed,
"training_ready": signals_processed > 0,
"from": peer_key,
"status": "accepted"
});
Some(crate::framing::Frame::new(
crate::framing::StreamType::BrainSync,
ack.to_string().into_bytes(),
))
}
// ============================================================================
// TESTS
// ============================================================================
#[cfg(test)]
mod tests {
use super::*;
// FL-9: LR Scheduler tests removed — LRScheduler deleted.
// --- EWC tests ---
#[test]
fn test_ewc_penalty_inactive() {
let ewc = OnlineEWC::new(100, 1000.0);
let (loss, grads) = ewc.penalty(&vec![1.0; 100]);
assert_eq!(loss, 0.0);
assert!(grads.iter().all(|&g| g == 0.0));
}
#[test]
fn test_ewc_penalty_active() {
let mut ewc = OnlineEWC::new(4, 1.0);
ewc.reference_weights = vec![1.0, 2.0, 3.0, 4.0];
ewc.fisher = vec![1.0, 1.0, 1.0, 1.0];
ewc.active = true;
let (loss, _) = ewc.penalty(&[1.0, 2.0, 3.0, 4.0]);
assert_eq!(loss, 0.0);
let (loss, grads) = ewc.penalty(&[2.0, 3.0, 4.0, 5.0]);
assert!(loss > 0.0);
assert!(grads[0] > 0.0);
}
#[test]
fn test_ewc_fisher_update() {
let mut ewc = OnlineEWC::new(4, 1000.0);
ewc.update_fisher(&[0.1, 0.2, 0.3, 0.4]);
assert_eq!(ewc.update_count, 1);
assert!((ewc.fisher[0] - 0.1 * 0.01).abs() < 1e-6);
}
#[test]
fn test_ewc_memory() {
let ewc = OnlineEWC::new(5_000_000, 1000.0);
assert_eq!(ewc.memory_bytes(), 40_000_000);
}
// --- Experience Replay tests ---
#[test]
fn test_replay_basic() {
let mut replay = ExperienceReplay::new(5);
for i in 0..3 {
replay.add(TrainingExample {
input_tokens: vec![i],
target: TrainingTarget::GateDecision(1.0),
weight: 1.0,
});
}
assert_eq!(replay.len(), 3);
assert_eq!(replay.regular_len(), 3);
assert_eq!(replay.fp_locked_len(), 0);
}
#[test]
fn test_replay_overflow() {
let mut replay = ExperienceReplay::new(3);
for i in 0..5 {
replay.add(TrainingExample {
input_tokens: vec![i],
target: TrainingTarget::GateDecision(1.0),
weight: 1.0,
});
}
assert_eq!(replay.regular_len(), 3);
assert_eq!(replay.total_added(), 5);
}
#[test]
fn test_replay_fp_locked() {
let mut replay = ExperienceReplay::new(3);
// Add FP example (weight >= 4.0)
replay.add(TrainingExample {
input_tokens: vec![99],
target: TrainingTarget::GateDecision(-1.0),
weight: 4.0, // FP weight
});
// Fill regular buffer past capacity
for i in 0..10 {
replay.add(TrainingExample {
input_tokens: vec![i],
target: TrainingTarget::GateDecision(1.0),
weight: 1.0,
});
}
// FP still locked
assert_eq!(replay.fp_locked_len(), 1);
assert_eq!(replay.regular_len(), 3); // capped
// Sample always includes FP
let samples = replay.sample(2, 42);
let fp_count = samples.iter().filter(|s| s.weight >= 4.0).count();
assert!(fp_count >= 1, "FP-locked example must be in every sample");
}
#[test]
fn test_replay_fp_never_evicted() {
let mut replay = ExperienceReplay::new(2);
// Add 3 FP examples
for _ in 0..3 {
replay.add(TrainingExample {
input_tokens: vec![0],
target: TrainingTarget::GateDecision(-1.0),
weight: 6.0, // repeat FP weight
});
}
// All 3 preserved (no capacity limit on FP-locked)
assert_eq!(replay.fp_locked_len(), 3);
assert_eq!(replay.regular_len(), 0);
}
// --- Confidence tests ---
#[test]
fn test_confidence_auto_allow() {
let conf = ConfidenceConfig::default();
assert_eq!(conf.decide(0.95), ConfidenceDecision::AutoAllow(0.95));
assert_eq!(conf.decide(0.8), ConfidenceDecision::AutoAllow(0.8));
}
#[test]
fn test_confidence_auto_block() {
let conf = ConfidenceConfig::default();
assert_eq!(conf.decide(0.1), ConfidenceDecision::AutoBlock(0.1));
assert_eq!(conf.decide(0.2), ConfidenceDecision::AutoBlock(0.2));
}
#[test]
fn test_confidence_ask_user() {
let conf = ConfidenceConfig::default();
assert_eq!(conf.decide(0.5), ConfidenceDecision::AskUser(0.5));
assert_eq!(conf.decide(0.3), ConfidenceDecision::AskUser(0.3));
assert_eq!(conf.decide(0.79), ConfidenceDecision::AskUser(0.79));
}
// FL-9: LearningController tests removed — LearningController deleted.
// --- Signal encoding tests ---
#[test]
fn test_signal_to_tokens() {
let signal = TrainingSignal {
tool: "spf_read".into(), source: "stdio".into(), allowed: true,
status: "ok".into(), duration_ms: 0, timestamp: "t".into(),
user_override: false, false_positive: false,
recent_call_count: 3, preceding_tools: vec!["spf_write".into()],
evil_score: 0.0, // Block EE
};
let tokens = signal_to_tokens(&signal);
assert_eq!(tokens[0], 4); // TOOL_ID
// Should contain BPE-encoded tool, [GATE], source, [SPF] separator, preceding tools
assert!(tokens.contains(&5)); // GATE_ID
assert!(tokens.contains(&7)); // SPF_ID separator
// Should have 3 frequency tokens at the end
let freq_count = tokens.iter().filter(|&&t| t == 6).count();
assert_eq!(freq_count, 3);
}
#[test]
fn test_signal_to_tokens_no_context() {
let signal = TrainingSignal {
tool: "test".into(), source: "http".into(), allowed: false,
status: "error".into(), duration_ms: 0, timestamp: "t".into(),
user_override: false, false_positive: false,
recent_call_count: 0, preceding_tools: vec![],
evil_score: 0.0, // Block EE
};
let tokens = signal_to_tokens(&signal);
assert_eq!(tokens[0], 4); // TOOL_ID
assert!(tokens.contains(&5)); // GATE_ID
assert!(!tokens.contains(&7)); // no SPF_ID — no preceding tools
assert!(!tokens.contains(&6)); // no frequency tokens
}
}