ataeff commited on
Commit
dd5b02b
·
verified ·
1 Parent(s): e710124

Upload janus/janus4_temporal_diff.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. janus/janus4_temporal_diff.py +248 -0
janus/janus4_temporal_diff.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ janus4_temporal_diff.py — Janus 4-way attention reference implementation.
4
+
5
+ 4th attention mechanism: Temporal Diff — attends to CHANGES between positions.
6
+ Based on Variant 4 (dedicated wtd+wvd) + Opus analysis fixes:
7
+ - Removed distance decay (RoPE handles this in full nanochat)
8
+ - Gate init biased against delta (-1.0) — model discovers if/when to use it
9
+ - Dedicated projections (no weight sharing with QKV or RRPRAM)
10
+
11
+ Architecture: QKV (semantic) + RRPRAM (positional) + Echo (self-resonance) + TemporalDiff (change detection)
12
+
13
+ What TemporalDiff captures that others don't:
14
+ - QKV with RoPE: encodes distance between positions, not content of change
15
+ - RRPRAM: positional patterns, not transitions
16
+ - Echo: self-similarity, not change rate
17
+ - TemporalDiff: "where did representation change, and do changes correlate?"
18
+
19
+ Pure Python, zero deps. For reference/testing. Production = C or PyTorch.
20
+
21
+ By Arianna Method, 2026-03-25.
22
+ """
23
+
24
+ import argparse
25
+ import math
26
+ import random
27
+
28
+ VOCAB = 256
29
+ MAX_T = 48
30
+ DIM = 48
31
+ HEADS = 4
32
+ HD = DIM // HEADS
33
+
34
+
35
+ def bpe_encode(text):
36
+ return list(text.encode('utf-8', errors='ignore'))
37
+
38
+
39
+ def bpe_decode(ids):
40
+ return bytes([i % 256 for i in ids]).decode('utf-8', errors='ignore')
41
+
42
+
43
+ def rand_mat(r, c, s=0.02):
44
+ return [[(random.random() * 2 - 1) * s for _ in range(c)] for _ in range(r)]
45
+
46
+
47
+ def vec_mat(v, m):
48
+ out = [0.0] * len(m[0])
49
+ for i, vi in enumerate(v):
50
+ row = m[i]
51
+ for j in range(len(out)):
52
+ out[j] += vi * row[j]
53
+ return out
54
+
55
+
56
+ def softmax(xs):
57
+ mx = max(xs)
58
+ ex = [math.exp(x - mx) for x in xs]
59
+ s = sum(ex) + 1e-9
60
+ return [x / s for x in ex]
61
+
62
+
63
+ class Janus4:
64
+ def __init__(self):
65
+ self.tok = rand_mat(VOCAB, DIM)
66
+ self.pos = rand_mat(MAX_T, DIM)
67
+ # QKV — semantic attention
68
+ self.wq = rand_mat(DIM, DIM)
69
+ self.wk = rand_mat(DIM, DIM)
70
+ self.wv = rand_mat(DIM, DIM)
71
+ # RRPRAM — positional resonance
72
+ self.wr = rand_mat(DIM, MAX_T)
73
+ self.wvr = rand_mat(DIM, DIM)
74
+ # Echo — self-resonance (W^T * W)
75
+ self.wj = rand_mat(DIM, DIM)
76
+ # Temporal Diff — dedicated projections (NOT shared with QKV/RRPRAM)
77
+ self.wtd = rand_mat(DIM, DIM) # delta key projection
78
+ self.wvd = rand_mat(DIM, DIM) # delta value projection
79
+ # 4-way gate — delta starts suppressed (Opus recommendation)
80
+ self.gate = [0.0, 0.0, 0.0, -1.0] # QKV, RRPRAM, Echo, TemporalDiff
81
+ # Output
82
+ self.out = rand_mat(DIM, VOCAB)
83
+ self.bias = [0.0] * VOCAB
84
+
85
+ def _dot(self, a, b):
86
+ return sum(x * y for x, y in zip(a, b))
87
+
88
+ def _head(self, v, h):
89
+ return v[h * HD:(h + 1) * HD]
90
+
91
+ def forward(self, ids):
92
+ T = len(ids)
93
+ x = [[self.tok[ids[t]][e] + self.pos[t][e] for e in range(DIM)] for t in range(T)]
94
+
95
+ # Precompute all projections
96
+ q = [vec_mat(x[t], self.wq) for t in range(T)]
97
+ k = [vec_mat(x[t], self.wk) for t in range(T)]
98
+ v = [vec_mat(x[t], self.wv) for t in range(T)]
99
+ rv = [vec_mat(x[t], self.wvr) for t in range(T)]
100
+ je = [vec_mat(x[t], self.wj) for t in range(T)]
101
+
102
+ # Temporal diff: delta of input
103
+ dx = [[0.0] * DIM for _ in range(T)]
104
+ for t in range(1, T):
105
+ for e in range(DIM):
106
+ dx[t][e] = x[t][e] - x[t - 1][e]
107
+
108
+ # Dedicated projections for delta (not shared!)
109
+ dk = [vec_mat(dx[t], self.wtd) for t in range(T)] # delta keys
110
+ dv = [vec_mat(dx[t], self.wvd) for t in range(T)] # delta values
111
+
112
+ g = softmax(self.gate)
113
+
114
+ cat = [[0.0] * DIM for _ in range(T)]
115
+ for h in range(HEADS):
116
+ # 1) QKV attention — semantic content matching
117
+ a1 = [[-1e9] * T for _ in range(T)]
118
+ for i in range(T):
119
+ qi = self._head(q[i], h)
120
+ for j in range(i + 1):
121
+ a1[i][j] = self._dot(qi, self._head(k[j], h)) / math.sqrt(HD)
122
+ a1[i] = softmax(a1[i])
123
+ ho = [[0.0] * HD for _ in range(T)]
124
+ for i in range(T):
125
+ for j in range(T):
126
+ vv = self._head(v[j], h)
127
+ for d in range(HD):
128
+ ho[i][d] += a1[i][j] * vv[d]
129
+
130
+ # 2) RRPRAM — positional resonance
131
+ a2 = [[-1e9] * T for _ in range(T)]
132
+ for i in range(T):
133
+ for j in range(i + 1):
134
+ a2[i][j] = sum(x[i][e] * self.wr[e][j] for e in range(DIM)) / math.sqrt(HD)
135
+ a2[i] = softmax(a2[i])
136
+ ro = [[0.0] * HD for _ in range(T)]
137
+ for i in range(T):
138
+ for j in range(T):
139
+ rvh = self._head(rv[j], h)
140
+ for d in range(HD):
141
+ ro[i][d] += a2[i][j] * rvh[d]
142
+
143
+ # 3) Echo — self-resonance (W^T * W)
144
+ a3 = [[-1e9] * T for _ in range(T)]
145
+ for i in range(T):
146
+ ei = self._head(je[i], h)
147
+ for j in range(i + 1):
148
+ a3[i][j] = self._dot(ei, self._head(je[j], h)) / math.sqrt(HD)
149
+ a3[i] = softmax(a3[i])
150
+ jo = [[0.0] * HD for _ in range(T)]
151
+ for i in range(T):
152
+ for j in range(T):
153
+ ej = self._head(je[j], h)
154
+ for d in range(HD):
155
+ jo[i][d] += a3[i][j] * ej[d]
156
+
157
+ # 4) Temporal Diff — change detection attention
158
+ # No distance decay (Opus fix: RoPE handles this in full implementation)
159
+ a4 = [[-1e9] * T for _ in range(T)]
160
+ for i in range(T):
161
+ dki = self._head(dk[i], h)
162
+ for j in range(i + 1):
163
+ a4[i][j] = self._dot(dki, self._head(dk[j], h)) / math.sqrt(HD)
164
+ a4[i] = softmax(a4[i])
165
+ to = [[0.0] * HD for _ in range(T)]
166
+ for i in range(T):
167
+ for j in range(T):
168
+ dvh = self._head(dv[j], h)
169
+ for d in range(HD):
170
+ to[i][d] += a4[i][j] * dvh[d]
171
+
172
+ # Gate blend — 4-way softmax
173
+ for t in range(T):
174
+ base = h * HD
175
+ for d in range(HD):
176
+ cat[t][base + d] = (g[0] * ho[t][d] + g[1] * ro[t][d] +
177
+ g[2] * jo[t][d] + g[3] * to[t][d])
178
+
179
+ logits = [[0.0] * VOCAB for _ in range(T)]
180
+ for t in range(T):
181
+ for vi in range(VOCAB):
182
+ logits[t][vi] = sum(cat[t][e] * self.out[e][vi] for e in range(DIM)) + self.bias[vi]
183
+ return logits, cat
184
+
185
+ def train_step(self, tok, tgt, lr):
186
+ logits, cat = self.forward(tok)
187
+ loss = 0.0
188
+ grad = [[0.0] * VOCAB for _ in range(len(tok))]
189
+ for t in range(len(tok)):
190
+ p = softmax(logits[t])
191
+ loss -= math.log(max(1e-9, p[tgt[t]]))
192
+ for vi in range(VOCAB):
193
+ grad[t][vi] = p[vi]
194
+ grad[t][tgt[t]] -= 1.0
195
+ loss /= len(tok)
196
+ # Gradient on output layer only (reference impl)
197
+ for t in range(len(tok)):
198
+ for e in range(DIM):
199
+ ce = cat[t][e]
200
+ if ce == 0.0:
201
+ continue
202
+ row = self.out[e]
203
+ for vi in range(VOCAB):
204
+ row[vi] -= lr * ce * grad[t][vi] / len(tok)
205
+ for vi in range(VOCAB):
206
+ self.bias[vi] -= lr * grad[t][vi] / len(tok)
207
+ return loss
208
+
209
+
210
+ def generate(model, prompt, n=60):
211
+ ids = bpe_encode(prompt)[-MAX_T:]
212
+ for _ in range(n):
213
+ logits, _ = model.forward(ids)
214
+ p = softmax(logits[-1])
215
+ ids.append(max(range(VOCAB), key=lambda i: p[i]))
216
+ ids = ids[-MAX_T:]
217
+ return bpe_decode(ids)
218
+
219
+
220
+ def train(model, text, steps, lr):
221
+ ids = bpe_encode(text)
222
+ losses = []
223
+ for step in range(1, steps + 1):
224
+ off = random.randint(0, max(0, len(ids) - MAX_T - 2))
225
+ tok = ids[off:off + MAX_T]
226
+ tgt = ids[off + 1:off + MAX_T + 1]
227
+ losses.append(model.train_step(tok, tgt, lr))
228
+ if step % 10 == 0:
229
+ print(f"step {step:4d}/{steps} loss={losses[-1]:.4f}")
230
+ return losses
231
+
232
+
233
+ if __name__ == '__main__':
234
+ ap = argparse.ArgumentParser()
235
+ ap.add_argument('--train', type=str)
236
+ ap.add_argument('--steps', type=int, default=40)
237
+ ap.add_argument('--lr', type=float, default=0.05)
238
+ ap.add_argument('--generate', type=str)
239
+ args = ap.parse_args()
240
+
241
+ random.seed(42)
242
+ m = Janus4()
243
+ if args.train:
244
+ txt = open(args.train, 'r', encoding='utf-8', errors='ignore').read()
245
+ losses = train(m, txt, args.steps, args.lr)
246
+ print(f'loss_start={losses[0]:.4f} loss_end={losses[-1]:.4f}')
247
+ if args.generate:
248
+ print(generate(m, args.generate))