|
import torch |
|
import torch.nn as nn |
|
from collections import deque |
|
from .memory import CognitiveMemory |
|
|
|
class CognitiveNode(nn.Module): |
|
"""Unit neuron dengan operasi tensor yang aman""" |
|
def __init__(self, node_id: int, input_size: int): |
|
super().__init__() |
|
self.id = node_id |
|
self.input_size = input_size |
|
|
|
|
|
self.weights = nn.Parameter(torch.randn(input_size) * 0.1) |
|
self.bias = nn.Parameter(torch.zeros(1)) |
|
self.memory = CognitiveMemory(context_size=input_size) |
|
|
|
|
|
self.dopamine = nn.Parameter(torch.tensor(0.5)) |
|
self.serotonin = nn.Parameter(torch.tensor(0.5)) |
|
self.recent_activations = deque(maxlen=100) |
|
|
|
def forward(self, inputs: torch.Tensor) -> torch.Tensor: |
|
|
|
inputs = inputs.view(-1) |
|
|
|
|
|
mem_context = self.memory.retrieve(inputs) |
|
combined = inputs * 0.7 + mem_context * 0.3 |
|
|
|
|
|
activation = torch.tanh(torch.dot(combined, self.weights) + self.bias) |
|
modulated = activation * (1 + torch.sigmoid(self.dopamine) |
|
- torch.sigmoid(self.serotonin)) |
|
|
|
|
|
self.memory.add_memory(inputs, modulated.item()) |
|
self.recent_activations.append(modulated.item()) |
|
|
|
return modulated.squeeze() |
|
|
|
def update_plasticity(self, reward: float): |
|
"""Update neurotransmitter dengan clamping""" |
|
with torch.no_grad(): |
|
self.dopamine.data = torch.clamp(self.dopamine + reward * 0.1, 0, 1) |
|
self.serotonin.data = torch.clamp(self.serotonin - reward * 0.05, 0, 1) |