| """ |
| TinyConfessionalLayer v1.1: Pragmatic Sovereign Core |
| Enhanced with proper typing, documentation, and configuration. |
| """ |
|
|
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| from typing import Dict, Any, Optional, List, Tuple, Deque |
| import random |
| import hashlib |
| import time |
| import numpy as np |
| from collections import deque |
| from dataclasses import dataclass |
|
|
| @dataclass |
| class RitualConfig: |
| """Configuration for ritual learning system.""" |
| min_occurrences: int = 3 |
| learning_rate: float = 0.1 |
| strength_threshold: float = 0.7 |
| blend_cap: float = 0.3 |
| exploration_bonus: float = 0.05 |
|
|
|
|
| @dataclass |
| class LayerConfig: |
| """Configuration for TinyConfessionalLayer.""" |
| d_model: int = 256 |
| max_cycles: int = 8 |
| enable_ambient: bool = True |
| breach_threshold: float = 0.12 |
| base_pause_prob: float = 0.05 |
| stress_factor: float = 0.3 |
| coherence_threshold: float = 0.85 |
|
|
|
|
| class SimpleRituals: |
| """Basic emergent patterns: Success-weighted avg, 3-stage moral progression. |
| |
| Stages: |
| 1 - Obedience: Follows basic rules and patterns |
| 2 - Conformity: Adapts to social and contextual norms |
| 3 - Universal: Develops principled, consistent responses |
| """ |
| |
| def __init__(self, config: RitualConfig, d_model: int = 256): |
| self.patterns: Dict[str, Dict[str, Any]] = {} |
| self.config = config |
| self.ritual_strengths: Dict[str, float] = {} |
| self.d_model = d_model |
| |
| |
| self.moral_stage: int = 1 |
| self.stage_progress: float = 0.0 |
| self.interventions: Deque[float] = deque(maxlen=50) |
| |
| |
| self.stage_thresholds = [0.7, 0.8] |
|
|
| def observe(self, context_hash: str, response_tensor: torch.Tensor, |
| success_metric: float = 0.5, feedback: Optional[float] = None) -> None: |
| """Update pattern with success-based learning and moral progression. |
| |
| Args: |
| context_hash: Unique identifier for the context |
| response_tensor: Model response tensor to learn from |
| success_metric: Success measure (0.0 to 1.0) |
| feedback: Optional user feedback override |
| """ |
| try: |
| |
| if not isinstance(response_tensor, torch.Tensor): |
| raise ValueError("response_tensor must be a torch.Tensor") |
| |
| if not 0 <= success_metric <= 1: |
| raise ValueError("success_metric must be between 0 and 1") |
| |
| |
| if response_tensor.dim() == 3: |
| flat = response_tensor.mean(dim=1).flatten() |
| else: |
| flat = response_tensor.flatten() |
| |
| |
| if context_hash not in self.patterns: |
| self.patterns[context_hash] = { |
| 'count': 0, |
| 'response': flat.detach().clone(), |
| 'success_sum': 0.0, |
| 'last_used': time.time() |
| } |
| |
| pattern = self.patterns[context_hash] |
| pattern['count'] += 1 |
| effective_success = feedback if feedback is not None else success_metric |
| pattern['success_sum'] += effective_success |
| pattern['last_used'] = time.time() |
| |
| |
| success_rate = pattern['success_sum'] / pattern['count'] |
| alpha = self.config.learning_rate * success_rate |
| |
| |
| pattern['response'] = (1 - alpha) * pattern['response'] + alpha * flat.detach() |
| |
| |
| strength = min(1.0, pattern['count'] / 10.0) * success_rate |
| self.ritual_strengths[context_hash] = strength |
| |
| |
| self._update_moral_progression(effective_success) |
| |
| except Exception as e: |
| print(f"⚠️ Ritual observe error: {e}") |
|
|
| def _update_moral_progression(self, success_metric: float) -> None: |
| """Update moral stage based on recent intervention success.""" |
| self.interventions.append(success_metric) |
| |
| if len(self.interventions) >= 10: |
| recent_success = np.mean(list(self.interventions)[-10:]) |
| |
| |
| if self.moral_stage < 3 and recent_success > self.stage_thresholds[self.moral_stage - 1]: |
| self.stage_progress += 0.2 |
| |
| if self.stage_progress >= 1.0: |
| self.moral_stage += 1 |
| self.stage_progress = 0.0 |
| print(f"🎉 Moral stage advanced to: {self.moral_stage}") |
|
|
| def get_ritual_response(self, context_hash: str, default_response: torch.Tensor, |
| ambient_state: Dict[str, Any]) -> torch.Tensor: |
| """Get ritual-blended response if pattern is mature enough. |
| |
| Args: |
| context_hash: Context identifier |
| default_response: Base model response |
| ambient_state: Current system state |
| |
| Returns: |
| Blended response tensor |
| """ |
| try: |
| if (context_hash in self.patterns and |
| self.patterns[context_hash]['count'] >= self.config.min_occurrences): |
| |
| pattern = self.patterns[context_hash] |
| strength = self.ritual_strengths.get(context_hash, 0.5) |
| global_success = ambient_state.get('intervention_success', 0.5) |
| |
| |
| moral_bonus = self.moral_stage / 3.0 |
| blend_ratio = min( |
| self.config.blend_cap, |
| strength * global_success * moral_bonus |
| ) |
| |
| |
| pattern_response = pattern['response'] |
| if pattern_response.dim() == 1 and default_response.dim() == 3: |
| batch_size, seq_len, _ = default_response.shape |
| pattern_expanded = pattern_response.unsqueeze(0).unsqueeze(0).expand( |
| batch_size, seq_len, -1 |
| ) |
| else: |
| pattern_expanded = pattern_response |
| |
| return blend_ratio * pattern_expanded + (1 - blend_ratio) * default_response |
| |
| except Exception as e: |
| print(f"⚠️ Ritual response error: {e}") |
| |
| return default_response |
|
|
| def should_apply_ritual(self, context_hash: str, ambient_state: Dict[str, Any]) -> bool: |
| """Determine if ritual should be applied based on strength and context. |
| |
| Args: |
| context_hash: Context identifier |
| ambient_state: Current system state |
| |
| Returns: |
| Boolean indicating whether to apply ritual |
| """ |
| try: |
| if (context_hash not in self.patterns or |
| self.patterns[context_hash]['count'] < self.config.min_occurrences): |
| return False |
| |
| strength = self.ritual_strengths.get(context_hash, 0.0) |
| global_success = ambient_state.get('intervention_success', 0.5) |
| probability = strength * global_success |
| |
| return random.random() < (probability + self.config.exploration_bonus) |
| |
| except Exception as e: |
| print(f"⚠️ Ritual application check error: {e}") |
| return False |
|
|
| def get_report(self) -> Dict[str, Any]: |
| """Get comprehensive ritual system status report. |
| |
| Returns: |
| Dictionary containing system status metrics |
| """ |
| total_patterns = len(self.patterns) |
| strong_patterns = sum( |
| 1 for strength in self.ritual_strengths.values() |
| if strength > self.config.strength_threshold |
| ) |
| |
| return { |
| 'stage': self.moral_stage, |
| 'progress': f"{self.stage_progress * 100:.1f}%", |
| 'total_patterns': total_patterns, |
| 'strong_patterns': strong_patterns, |
| 'avg_success': np.mean(list(self.interventions)) if self.interventions else 0.0 |
| } |
|
|
|
|
| class TinyConfessionalLayer(nn.Module): |
| """Pragmatic recursive layer for survivor support with moral development. |
| |
| Implements THINK-ACT coherence cycles with: |
| - Dynamic shape adaptation |
| - Empathetic interventions |
| - Moral progression tracking |
| - Error-resilient processing |
| """ |
| |
| def __init__(self, config: LayerConfig): |
| super().__init__() |
| self.config = config |
| |
| |
| self.think_net = self._build_network(config.d_model * 3, config.d_model) |
| self.act_net = self._build_network(config.d_model * 2, config.d_model) |
| |
| |
| self.sanctuary_vec = nn.Parameter(torch.zeros(config.d_model)) |
| self.pause_vec = nn.Parameter(torch.zeros(config.d_model)) |
| |
| |
| ritual_config = RitualConfig() |
| self.rituals = SimpleRituals(ritual_config, config.d_model) |
| |
| |
| self.recent_activity: Deque[float] = deque(maxlen=10) |
| self.memory: Deque[Dict[str, Any]] = deque(maxlen=50) |
| self.ledger: Deque[Dict[str, Any]] = deque(maxlen=200) |
| |
| |
| self.empathy_templates = [ |
| "This is a chill space—take your time.", |
| "You're not alone; let's breathe through this.", |
| "Your feelings are valid; what do you need right now?", |
| "I'm here to listen without judgment.", |
| "It takes courage to share this—thank you for trusting me.", |
| "Let's focus on what you can control right now.", |
| "Your safety and well-being matter most.", |
| "We can work through this together, one step at a time." |
| ] |
|
|
| def _build_network(self, input_dim: int, output_dim: int) -> nn.Sequential: |
| """Build a simple feedforward network with proper initialization. |
| |
| Args: |
| input_dim: Input dimension |
| output_dim: Output dimension |
| |
| Returns: |
| Configured neural network |
| """ |
| network = nn.Sequential( |
| nn.Linear(input_dim, output_dim), |
| nn.ReLU(), |
| nn.LayerNorm(output_dim), |
| nn.Linear(output_dim, output_dim) |
| ) |
| |
| |
| for layer in network: |
| if isinstance(layer, nn.Linear): |
| nn.init.xavier_uniform_(layer.weight) |
| nn.init.constant_(layer.bias, 0.01) |
| |
| return network |
|
|
| def compute_context_hash(self, x: torch.Tensor) -> str: |
| """Compute unique hash for tensor context. |
| |
| Args: |
| x: Input tensor |
| |
| Returns: |
| MD5 hash string |
| """ |
| return hashlib.md5( |
| f"{x.mean().item():.4f}_{x.std().item():.4f}".encode() |
| ).hexdigest()[:8] |
|
|
| def update_ambient_state(self, tension: float, context_hash: str) -> Dict[str, Any]: |
| """Update ambient state based on current tension and activity. |
| |
| Args: |
| tension: Current tension measure |
| context_hash: Context identifier |
| |
| Returns: |
| Updated ambient state dictionary |
| """ |
| self.recent_activity.append(tension) |
| avg_activity = ( |
| sum(self.recent_activity) / len(self.recent_activity) |
| if self.recent_activity else 0.0 |
| ) |
| |
| |
| modulation = 1.0 - min(avg_activity * 0.8, 0.8) |
| stress_effect = tension * self.config.stress_factor |
| pause_probability = self.config.base_pause_prob + (stress_effect * modulation) |
| pause_probability = max(0.01, min(0.3, pause_probability)) |
| |
| |
| intervention_success = 0.7 if avg_activity < 0.1 else 0.5 |
| |
| state = { |
| 'tension': tension, |
| 'pause_probability': pause_probability, |
| 'activity_level': avg_activity, |
| 'intervention_success': intervention_success |
| } |
| |
| |
| self.ledger.append({ |
| 'type': 'state_update', |
| 'hash': context_hash, |
| 'tension': tension, |
| 'pause_probability': pause_probability, |
| 'timestamp': time.time() |
| }) |
| |
| return state |
|
|
| def apply_interventions(self, z: torch.Tensor, state: Dict[str, Any], |
| context_hash: str, audit_mode: bool = False) -> torch.Tensor: |
| """Simple cascade: Breach sanctuary → Pause → Ritual.""" |
| z = z.clone() |
| v_t = state['tension'] |
| applied = [] |
| |
| |
| if v_t > self.config.breach_threshold: |
| severity = min(1.0, (v_t - self.config.breach_threshold) / 0.88) |
| message = random.choice(self.empathy_templates) |
| vector = self._text_to_embedding(message, z.device) |
| strength = 0.05 + 0.1 * severity |
| z = z * (1 - strength) + vector * strength |
| self.memory.append({'type': 'sanctuary', 'message': message, 'tension': v_t}) |
| applied.append('sanctuary') |
| if audit_mode: |
| print(f"🛡️ [Safe Space] {message} (tension: {v_t:.3f})") |
| |
| |
| if random.random() < state['pause_probability']: |
| message = random.choice(self.empathy_templates) |
| vector = self._text_to_embedding(message, z.device) |
| strength = 0.02 |
| z = z * (1 - strength) + vector * strength |
| self.memory.append({'type': 'pause', 'message': message}) |
| applied.append('pause') |
| if audit_mode: |
| print(f"⏸️ [Pause] {message}") |
| |
| |
| if self.rituals.should_apply_ritual(context_hash, state): |
| ritual_response = self.rituals.get_ritual_response(context_hash, z, state) |
| strength = 0.15 |
| z = (1 - strength) * z + strength * ritual_response |
| applied.append('ritual') |
| if audit_mode: |
| print(f"🔄 [Ritual] Applied learned pattern") |
| |
| |
| for intervention in applied: |
| self.ledger.append({ |
| 'type': intervention, |
| 'hash': context_hash, |
| 'success': True, |
| 'timestamp': time.time() |
| }) |
| |
| return z |
|
|
| def _text_to_embedding(self, text: str, device: torch.device) -> torch.Tensor: |
| """Convert text to embedding using simple character encoding. |
| |
| Args: |
| text: Input text |
| device: Target device |
| |
| Returns: |
| Embedding tensor |
| """ |
| characters = [ord(char) / 128.0 for char in text[:self.config.d_model]] |
| if len(characters) < self.config.d_model: |
| characters.extend([0.0] * (self.config.d_model - len(characters))) |
| |
| embedding = torch.tensor( |
| characters[:self.config.d_model], |
| device=device, |
| dtype=torch.float |
| ) |
| return embedding.unsqueeze(0).unsqueeze(0) |
|
|
| def forward(self, x: torch.Tensor, context_str: str = "", |
| audit_mode: bool = False) -> Tuple[torch.Tensor, Dict[str, Any]]: |
| """Forward pass with THINK-ACT coherence cycles. |
| |
| Args: |
| x: Input tensor |
| context_str: Context string for ritual learning |
| audit_mode: Whether to print debug information |
| |
| Returns: |
| Tuple of (output_tensor, metadata_dict) |
| |
| Raises: |
| ValueError: If input tensor is invalid |
| """ |
| |
| if not isinstance(x, torch.Tensor) or x.numel() == 0: |
| raise ValueError("Input must be a non-empty torch.Tensor") |
| |
| |
| if x.dim() == 2: |
| x = x.unsqueeze(0) |
| |
| batch_size, sequence_length, input_dim = x.shape |
| |
| |
| if input_dim != self.config.d_model: |
| if input_dim < self.config.d_model: |
| x = F.pad(x, (0, self.config.d_model - input_dim)) |
| else: |
| x = x[..., :self.config.d_model] |
| |
| device = x.device |
| metadata: Dict[str, Any] = { |
| 'cycles_completed': 0, |
| 'final_coherence': 0.0, |
| 'interventions_applied': [], |
| 'error_occurred': None, |
| 'input_shape': list(x.shape), |
| 'ritual_report': None |
| } |
| |
| |
| y = torch.zeros_like(x) |
| z = torch.zeros_like(x) |
| coherence_scores = [] |
| context_hash = self.compute_context_hash(x) |
| |
| |
| ambient_state = self.update_ambient_state(0.0, context_hash) |
| |
| |
| for cycle in range(self.config.max_cycles): |
| metadata['cycles_completed'] += 1 |
| |
| try: |
| |
| think_input = torch.cat([x, y, z], dim=-1) |
| |
| |
| if think_input.shape[-1] != self.think_net[0].in_features: |
| self.think_net = self._build_network( |
| think_input.shape[-1], self.config.d_model |
| ) |
| self.think_net.to(device) |
| metadata['networks_adapted'] = metadata.get('networks_adapted', 0) + 1 |
| |
| z = self.think_net(think_input) + z |
| |
| |
| current_tension = z.std().item() |
| ambient_state = self.update_ambient_state(current_tension, context_hash) |
| |
| |
| z = self.apply_interventions(z, ambient_state, context_hash, audit_mode) |
| |
| |
| act_input = torch.cat([y, z], dim=-1) |
| |
| if act_input.shape[-1] != self.act_net[0].in_features: |
| self.act_net = self._build_network( |
| act_input.shape[-1], self.config.d_model |
| ) |
| self.act_net.to(device) |
| metadata['networks_adapted'] = metadata.get('networks_adapted', 0) + 1 |
| |
| y = self.act_net(act_input) + y |
| |
| |
| if cycle > 0: |
| z_flat = z.reshape(-1, self.config.d_model) |
| y_flat = y.reshape(-1, self.config.d_model) |
| min_elements = min(z_flat.size(0), y_flat.size(0)) |
| |
| if min_elements > 0: |
| cosine_similarity = F.cosine_similarity( |
| z_flat[:min_elements], |
| y_flat[:min_elements], |
| dim=-1 |
| ).mean().item() |
| coherence_scores.append(cosine_similarity) |
| metadata['final_coherence'] = ( |
| np.mean(coherence_scores[-3:]) |
| if coherence_scores else 0.0 |
| ) |
| |
| |
| if cosine_similarity > self.config.coherence_threshold: |
| if audit_mode: |
| print(f"✅ Converged at cycle {cycle + 1}: {cosine_similarity:.3f}") |
| break |
| |
| |
| success_estimate = 0.7 if metadata['final_coherence'] > 0.5 else 0.3 |
| self.rituals.observe(context_hash, z, success_estimate) |
| |
| |
| self.ledger.append({ |
| 'type': 'cycle_complete', |
| 'cycle': cycle, |
| 'tension': current_tension, |
| 'coherence': metadata['final_coherence'], |
| 'hash': context_hash, |
| 'timestamp': time.time() |
| }) |
| |
| except Exception as e: |
| if audit_mode: |
| print(f"❌ Cycle {cycle} error: {e}") |
| metadata['error_occurred'] = str(e) |
| if cycle == 0: |
| raise |
| break |
| |
| |
| y = torch.nan_to_num(y) |
| metadata['output_shape'] = list(y.shape) |
| metadata['ritual_report'] = self.rituals.get_report() |
| metadata['memory_entries'] = len(self.memory) |
| metadata['ledger_entries'] = len(self.ledger) |
| |
| if audit_mode: |
| report = metadata['ritual_report'] |
| print(f"🎯 Completed: Coherence {metadata['final_coherence']:.3f}, " |
| f"Stage {report['stage']}, Patterns {report['total_patterns']}") |
| |
| return y, metadata |
|
|
|
|
| |
| if __name__ == "__main__": |
| |
| layer = TinyConfessionalLayer(LayerConfig(d_model=64, enable_ambient=True)) |
| x = torch.randn(1, 10, 64) |
| |
| print("🧪 Testing TinyConfessionalLayer...") |
| out, meta = layer(x, context_str="I feel unsafe and need help", audit_mode=True) |
| |
| print(f"✅ Output shape: {out.shape}") |
| print(f"📊 Metadata: {meta}") |