incognitolm commited on
Commit
03eabfb
·
1 Parent(s): bff1056
scripts/migrate-to-postgres.js CHANGED
@@ -1,573 +1,6 @@
1
- import fs from 'fs/promises';
2
- import path from 'path';
3
- import { createClient } from '@supabase/supabase-js';
4
- import { loadEncryptedJson } from '../server/cryptoUtils.js';
5
- import {
6
- POSTGRES_STORAGE_DIR,
7
- POSTGRES_STORAGE_MANIFEST,
8
- DATA_ROOT,
9
- } from '../server/dataPaths.js';
10
- import {
11
- createStandalonePostgresPool,
12
- encryptJsonPayload,
13
- makeLookupToken,
14
- makeOwnerLookup,
15
- } from '../server/postgres.js';
16
- import { POSTGRES_SCHEMA_SQL } from '../server/postgresSchema.js';
17
- import { SUPABASE_URL } from '../server/config.js';
18
 
19
- const TEMP_TTL_MS = 24 * 60 * 60 * 1000;
20
- const TEMP_INACTIVITY = 12 * 60 * 60 * 1000;
21
- const FEEDBACK_AAD = 'feedback_tickets_v1';
22
- const VERSION_FILE = path.join(DATA_ROOT, 'version.json');
23
- const WEB_SEARCH_USAGE_FILE = path.join(DATA_ROOT, 'web-search-usage.json');
24
- const MEDIA_INDEX_FILE = path.join(DATA_ROOT, 'media', 'index.json');
25
- const MEDIA_BLOBS_DIR = path.join(DATA_ROOT, 'media', 'blobs');
26
- const TEMP_STORE_FILE = path.join(DATA_ROOT, 'temp_sessions.json');
27
- const MEMORIES_FILE = path.join(DATA_ROOT, 'memories', 'index.json');
28
- const DELETED_CHATS_FILE = path.join(DATA_ROOT, 'deleted_chats', 'index.json');
29
- const SYSTEM_PROMPTS_FILE = path.join(DATA_ROOT, 'system-prompts', 'index.json');
30
- const FEEDBACK_FILE = path.join(DATA_ROOT, 'feedback_tickets.json');
31
- const GUEST_REQUEST_FILE = path.join(DATA_ROOT, 'guest_request_counts.json');
32
-
33
- function nowIso() {
34
- return new Date().toISOString();
35
- }
36
-
37
- function sessionAad(scopeType, sessionId) {
38
- return `chat-session:${scopeType}:${sessionId}`;
39
- }
40
-
41
- function guestStateLookup(tempId) {
42
- return makeLookupToken('guest-state', tempId);
43
- }
44
-
45
- function guestStateAad(tempId) {
46
- return `guest-state:${tempId}`;
47
- }
48
-
49
- function guestExpiryRecord(tempData) {
50
- const createdExpires = (tempData.created || Date.now()) + TEMP_TTL_MS;
51
- const inactiveExpires = (tempData.lastActive || Date.now()) + TEMP_INACTIVITY;
52
- return new Date(Math.min(createdExpires, inactiveExpires)).toISOString();
53
- }
54
-
55
- function shareTokenLookup(token) {
56
- return makeLookupToken('session-share-token', token);
57
- }
58
-
59
- function shareAad(recordId) {
60
- return `session-share:${recordId}`;
61
- }
62
-
63
- function promptLookup(userId) {
64
- return makeLookupToken('system-prompt', userId);
65
- }
66
-
67
- function promptAad(userId) {
68
- return `system-prompt:${userId}`;
69
- }
70
-
71
- function mediaEntryAad(id) {
72
- return `media-entry:${id}`;
73
- }
74
-
75
- function feedbackAad(id) {
76
- return `feedback:${id}`;
77
- }
78
-
79
- function usernameLookup(username) {
80
- return makeLookupToken('username', username);
81
- }
82
-
83
- function versionLookup(publicUrl) {
84
- return makeLookupToken('app-version', publicUrl);
85
- }
86
-
87
- function versionAad(publicUrl) {
88
- return `app-version:${publicUrl}`;
89
- }
90
-
91
- function requestLookup(ip) {
92
- return makeLookupToken('guest-request', ip);
93
- }
94
-
95
- function webUsageLookup(key) {
96
- return makeLookupToken('web-search-usage', key);
97
- }
98
-
99
- function webUsageAad(key, day) {
100
- return `web-search-usage:${key}:${day}`;
101
- }
102
-
103
- async function fileExists(filePath) {
104
- try {
105
- await fs.access(filePath);
106
- return true;
107
- } catch {
108
- return false;
109
- }
110
- }
111
-
112
- async function readJsonIfExists(filePath) {
113
- try {
114
- return JSON.parse(await fs.readFile(filePath, 'utf8'));
115
- } catch {
116
- return null;
117
- }
118
- }
119
-
120
- async function fetchAllSupabaseRows(client, tableName) {
121
- const pageSize = 1000;
122
- const rows = [];
123
- let from = 0;
124
- while (true) {
125
- const { data, error } = await client
126
- .from(tableName)
127
- .select('*')
128
- .range(from, from + pageSize - 1);
129
- if (error) throw error;
130
- rows.push(...(data || []));
131
- if (!data || data.length < pageSize) break;
132
- from += pageSize;
133
- }
134
- return rows;
135
- }
136
-
137
- async function truncateTargetTables(pool) {
138
- await pool.query(`
139
- TRUNCATE TABLE
140
- media_blobs,
141
- media_entries,
142
- deleted_chats,
143
- memories,
144
- system_prompts,
145
- feedback_tickets,
146
- guest_request_counters,
147
- web_search_usage,
148
- user_settings,
149
- user_profiles,
150
- device_sessions,
151
- session_shares,
152
- chat_sessions,
153
- guest_state,
154
- app_versions
155
- RESTART IDENTITY
156
- CASCADE
157
- `);
158
- }
159
-
160
- async function migrateVersions(pool, report) {
161
- const data = await readJsonIfExists(VERSION_FILE);
162
- const entries = Array.isArray(data) ? data : [];
163
- let count = 0;
164
- for (const entry of entries) {
165
- for (const [publicUrl, sha] of Object.entries(entry || {})) {
166
- await pool.query(
167
- `INSERT INTO app_versions (public_url_lookup, updated_at, payload)
168
- VALUES ($1, $2, $3::jsonb)`,
169
- [
170
- versionLookup(publicUrl),
171
- nowIso(),
172
- JSON.stringify(encryptJsonPayload({ publicUrl, sha }, versionAad(publicUrl))),
173
- ]
174
- );
175
- count += 1;
176
- }
177
- }
178
- report.migrated.versionEntries = count;
179
- }
180
-
181
- async function migrateTempSessions(pool, report) {
182
- const data = await loadEncryptedJson(TEMP_STORE_FILE);
183
- const records = data || {};
184
- let ownerCount = 0;
185
- let sessionCount = 0;
186
-
187
- for (const [tempId, tempData] of Object.entries(records)) {
188
- ownerCount += 1;
189
- const owner = { type: 'guest', id: tempId };
190
- await pool.query(
191
- `INSERT INTO guest_state (owner_lookup, expires_at, updated_at, payload)
192
- VALUES ($1, $2, $3, $4::jsonb)`,
193
- [
194
- guestStateLookup(tempId),
195
- guestExpiryRecord(tempData),
196
- nowIso(),
197
- JSON.stringify(encryptJsonPayload({
198
- tempId,
199
- msgCount: tempData.msgCount || 0,
200
- created: tempData.created || Date.now(),
201
- lastActive: tempData.lastActive || Date.now(),
202
- }, guestStateAad(tempId))),
203
- ]
204
- );
205
-
206
- for (const session of Object.values(tempData.sessions || {})) {
207
- await pool.query(
208
- `INSERT INTO chat_sessions (id, scope_type, owner_lookup, created_at, updated_at, expires_at, payload)
209
- VALUES ($1, $2, $3, $4, $5, $6, $7::jsonb)`,
210
- [
211
- session.id,
212
- 'guest',
213
- makeOwnerLookup(owner),
214
- new Date(session.created || Date.now()).toISOString(),
215
- nowIso(),
216
- guestExpiryRecord(tempData),
217
- JSON.stringify(encryptJsonPayload(session, sessionAad('guest', session.id))),
218
- ]
219
- );
220
- sessionCount += 1;
221
- }
222
- }
223
-
224
- report.migrated.tempOwners = ownerCount;
225
- report.migrated.tempSessions = sessionCount;
226
- }
227
-
228
- async function migrateMemories(pool, report) {
229
- const data = await loadEncryptedJson(MEMORIES_FILE);
230
- const memories = Object.values(data?.memories || {});
231
- for (const memory of memories) {
232
- await pool.query(
233
- `INSERT INTO memories (id, owner_lookup, created_at, updated_at, payload)
234
- VALUES ($1, $2, $3, $4, $5::jsonb)`,
235
- [
236
- memory.id,
237
- makeOwnerLookup({ type: memory.ownerType, id: memory.ownerId }),
238
- memory.createdAt,
239
- memory.updatedAt,
240
- JSON.stringify(encryptJsonPayload(memory, `memory:${memory.id}`)),
241
- ]
242
- );
243
- }
244
- report.migrated.memories = memories.length;
245
- }
246
-
247
- async function migrateDeletedChats(pool, report) {
248
- const data = await loadEncryptedJson(DELETED_CHATS_FILE);
249
- const deletedChats = Object.values(data?.deletedChats || {});
250
- for (const record of deletedChats) {
251
- await pool.query(
252
- `INSERT INTO deleted_chats (id, owner_lookup, purge_at, deleted_at, payload)
253
- VALUES ($1, $2, $3, $4, $5::jsonb)`,
254
- [
255
- record.id,
256
- makeOwnerLookup({ type: record.ownerType, id: record.ownerId }),
257
- record.purgeAt || null,
258
- record.deletedAt,
259
- JSON.stringify(encryptJsonPayload(record, `deleted-chat:${record.id}`)),
260
- ]
261
- );
262
- }
263
- report.migrated.deletedChats = deletedChats.length;
264
- }
265
-
266
- async function migrateSystemPrompts(pool, report) {
267
- const data = await loadEncryptedJson(SYSTEM_PROMPTS_FILE, 'system-prompts');
268
- const prompts = Object.entries(data?.prompts || {});
269
- for (const [userId, prompt] of prompts) {
270
- const record = {
271
- userId,
272
- markdown: prompt.markdown,
273
- updatedAt: prompt.updatedAt || nowIso(),
274
- };
275
- await pool.query(
276
- `INSERT INTO system_prompts (owner_lookup, updated_at, payload)
277
- VALUES ($1, $2, $3::jsonb)`,
278
- [
279
- promptLookup(userId),
280
- record.updatedAt,
281
- JSON.stringify(encryptJsonPayload(record, promptAad(userId))),
282
- ]
283
- );
284
- }
285
- report.migrated.systemPrompts = prompts.length;
286
- }
287
-
288
- async function migrateFeedback(pool, report) {
289
- const data = await loadEncryptedJson(FEEDBACK_FILE, FEEDBACK_AAD);
290
- const tickets = Array.isArray(data?.tickets) ? data.tickets : [];
291
- for (const ticket of tickets) {
292
- await pool.query(
293
- `INSERT INTO feedback_tickets (id, status, submitted_at, payload)
294
- VALUES ($1, $2, $3, $4::jsonb)`,
295
- [
296
- ticket.id,
297
- ticket.status || 'open',
298
- ticket.submittedAt,
299
- JSON.stringify(encryptJsonPayload(ticket, feedbackAad(ticket.id))),
300
- ]
301
- );
302
- }
303
- report.migrated.feedbackTickets = tickets.length;
304
- }
305
-
306
- async function migrateGuestRequestCounters(pool, report) {
307
- const data = await loadEncryptedJson(GUEST_REQUEST_FILE);
308
- const entries = Object.entries(data || {});
309
- for (const [ip, entry] of entries) {
310
- await pool.query(
311
- `INSERT INTO guest_request_counters (key_lookup, expires_at, updated_at, payload)
312
- VALUES ($1, $2, $3, $4::jsonb)`,
313
- [
314
- requestLookup(ip),
315
- new Date(entry.resetAt || Date.now()).toISOString(),
316
- nowIso(),
317
- JSON.stringify(encryptJsonPayload({
318
- ip,
319
- count: entry.count || 0,
320
- resetAt: entry.resetAt || Date.now(),
321
- }, 'guest-request-row')),
322
- ]
323
- );
324
- }
325
- report.migrated.guestRequestCounters = entries.length;
326
- }
327
-
328
- async function migrateWebSearchUsage(pool, report) {
329
- const data = await readJsonIfExists(WEB_SEARCH_USAGE_FILE);
330
- const days = data?.days && typeof data.days === 'object' ? data.days : {};
331
- let count = 0;
332
- for (const [dayKey, keys] of Object.entries(days)) {
333
- for (const [key, used] of Object.entries(keys || {})) {
334
- await pool.query(
335
- `INSERT INTO web_search_usage (key_lookup, day_key, updated_at, payload)
336
- VALUES ($1, $2, $3, $4::jsonb)`,
337
- [
338
- webUsageLookup(key),
339
- dayKey,
340
- nowIso(),
341
- JSON.stringify(encryptJsonPayload({ used }, webUsageAad(key, dayKey))),
342
- ]
343
- );
344
- count += 1;
345
- }
346
- }
347
- report.migrated.webSearchUsageRows = count;
348
- }
349
-
350
- async function migrateMedia(pool, report) {
351
- const data = await loadEncryptedJson(MEDIA_INDEX_FILE);
352
- const entries = Object.values(data?.entries || {});
353
- let blobCount = 0;
354
- for (const entry of entries) {
355
- await pool.query(
356
- `INSERT INTO media_entries (
357
- id, owner_lookup, parent_id, entry_type, updated_at, created_at,
358
- trashed_at, purge_at, expires_at, size_bytes, payload
359
- )
360
- VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11::jsonb)`,
361
- [
362
- entry.id,
363
- makeOwnerLookup({ type: entry.ownerType, id: entry.ownerId }),
364
- entry.parentId || null,
365
- entry.type,
366
- entry.updatedAt || entry.createdAt,
367
- entry.createdAt,
368
- entry.trashedAt || null,
369
- entry.purgeAt || null,
370
- entry.expiresAt || null,
371
- entry.size || 0,
372
- JSON.stringify(encryptJsonPayload(entry, mediaEntryAad(entry.id))),
373
- ]
374
- );
375
-
376
- if (entry.type === 'file') {
377
- const blobPath = path.join(MEDIA_BLOBS_DIR, `${entry.id}.bin`);
378
- if (await fileExists(blobPath)) {
379
- const blob = await fs.readFile(blobPath);
380
- await pool.query(
381
- `INSERT INTO media_blobs (entry_id, updated_at, payload)
382
- VALUES ($1, $2, $3)`,
383
- [entry.id, entry.updatedAt || entry.createdAt, blob]
384
- );
385
- blobCount += 1;
386
- }
387
- }
388
- }
389
- report.migrated.mediaEntries = entries.length;
390
- report.migrated.mediaBlobs = blobCount;
391
- }
392
-
393
- async function migrateSupabaseData(pool, report) {
394
- const serviceRoleKey = process.env.SUPABASE_SERVICE_ROLE_KEY || '';
395
- if (!serviceRoleKey) {
396
- if (process.env.ALLOW_PARTIAL_SQL_MIGRATION === '1') {
397
- report.pending.push(
398
- 'Supabase-backed user tables were not exported because SUPABASE_SERVICE_ROLE_KEY is not set.'
399
- );
400
- return;
401
- }
402
- throw new Error(
403
- 'SUPABASE_SERVICE_ROLE_KEY is required to migrate existing Supabase-backed sessions, shares, settings, and profiles before enabling PostgreSQL-only mode. Set ALLOW_PARTIAL_SQL_MIGRATION=1 only if you intentionally want to skip those records.'
404
- );
405
- }
406
-
407
- const supabase = createClient(SUPABASE_URL, serviceRoleKey, {
408
- auth: { persistSession: false },
409
- });
410
-
411
- const [webSessions, sharedSessions, userSettings, profiles] = await Promise.all([
412
- fetchAllSupabaseRows(supabase, 'web_sessions'),
413
- fetchAllSupabaseRows(supabase, 'shared_sessions'),
414
- fetchAllSupabaseRows(supabase, 'user_settings'),
415
- fetchAllSupabaseRows(supabase, 'profiles'),
416
- ]);
417
-
418
- for (const row of webSessions) {
419
- const session = {
420
- id: row.id,
421
- name: row.name,
422
- created: new Date(row.created_at).getTime(),
423
- history: row.history || [],
424
- model: row.model || null,
425
- };
426
- await pool.query(
427
- `INSERT INTO chat_sessions (id, scope_type, owner_lookup, created_at, updated_at, expires_at, payload)
428
- VALUES ($1, $2, $3, $4, $5, $6, $7::jsonb)`,
429
- [
430
- row.id,
431
- 'user',
432
- makeOwnerLookup({ type: 'user', id: row.user_id }),
433
- row.created_at,
434
- row.updated_at || nowIso(),
435
- null,
436
- JSON.stringify(encryptJsonPayload(session, sessionAad('user', row.id))),
437
- ]
438
- );
439
- }
440
-
441
- for (const row of sharedSessions) {
442
- const lookup = shareTokenLookup(row.token);
443
- const id = row.id || `share_${lookup.slice(0, 24)}`;
444
- await pool.query(
445
- `INSERT INTO session_shares (id, token_lookup, owner_lookup, created_at, payload)
446
- VALUES ($1, $2, $3, $4, $5::jsonb)`,
447
- [
448
- id,
449
- lookup,
450
- makeOwnerLookup({ type: 'user', id: row.owner_id }),
451
- row.created_at || nowIso(),
452
- JSON.stringify(encryptJsonPayload({
453
- id,
454
- ownerId: row.owner_id,
455
- sessionSnapshot: row.session_snapshot,
456
- createdAt: row.created_at || nowIso(),
457
- }, shareAad(id))),
458
- ]
459
- );
460
- }
461
-
462
- for (const row of userSettings) {
463
- await pool.query(
464
- `INSERT INTO user_settings (owner_lookup, updated_at, payload)
465
- VALUES ($1, $2, $3::jsonb)`,
466
- [
467
- makeOwnerLookup({ type: 'user', id: row.user_id }),
468
- row.updated_at || nowIso(),
469
- JSON.stringify(encryptJsonPayload({
470
- userId: row.user_id,
471
- settings: row.settings || {},
472
- updatedAt: row.updated_at || nowIso(),
473
- }, `user-settings:${row.user_id}`)),
474
- ]
475
- );
476
- }
477
-
478
- for (const row of profiles) {
479
- if (!row.id || !row.username) continue;
480
- await pool.query(
481
- `INSERT INTO user_profiles (owner_lookup, username_lookup, updated_at, payload)
482
- VALUES ($1, $2, $3, $4::jsonb)`,
483
- [
484
- makeOwnerLookup({ type: 'user', id: row.id }),
485
- usernameLookup(row.username),
486
- row.updated_at || nowIso(),
487
- JSON.stringify(encryptJsonPayload({
488
- userId: row.id,
489
- username: row.username,
490
- updatedAt: row.updated_at || nowIso(),
491
- }, `user-profile:${row.id}`)),
492
- ]
493
- );
494
- }
495
-
496
- report.migrated.supabaseWebSessions = webSessions.length;
497
- report.migrated.supabaseSharedSessions = sharedSessions.length;
498
- report.migrated.supabaseUserSettings = userSettings.length;
499
- report.migrated.supabaseProfiles = profiles.length;
500
- }
501
-
502
- async function writeStorageFolder(report) {
503
- const tempDir = `${POSTGRES_STORAGE_DIR}.tmp`;
504
- await fs.rm(tempDir, { recursive: true, force: true });
505
- await fs.mkdir(tempDir, { recursive: true });
506
- await fs.writeFile(
507
- path.join(tempDir, path.basename(POSTGRES_STORAGE_MANIFEST)),
508
- JSON.stringify({
509
- createdAt: report.completedAt,
510
- storageMode: 'postgres',
511
- status: report.status,
512
- pending: report.pending,
513
- }, null, 2),
514
- 'utf8'
515
- );
516
- await fs.writeFile(path.join(tempDir, 'schema.sql'), POSTGRES_SCHEMA_SQL, 'utf8');
517
- await fs.writeFile(path.join(tempDir, 'migration-report.json'), JSON.stringify(report, null, 2), 'utf8');
518
- await fs.writeFile(
519
- path.join(tempDir, 'README.txt'),
520
- [
521
- 'This folder marks PostgreSQL storage as active for the backend.',
522
- 'The server checks for this folder on startup and uses PostgreSQL-only mode when it exists.',
523
- 'schema.sql contains the schema used for the migrated encrypted SQL backend.',
524
- ].join('\n'),
525
- 'utf8'
526
- );
527
- await fs.rename(tempDir, POSTGRES_STORAGE_DIR);
528
- }
529
-
530
- async function main() {
531
- if (await fileExists(POSTGRES_STORAGE_DIR)) {
532
- throw new Error(`SQL storage folder already exists at ${POSTGRES_STORAGE_DIR}. Remove it before running the migration again.`);
533
- }
534
-
535
- await fs.mkdir(DATA_ROOT, { recursive: true });
536
-
537
- const pool = await createStandalonePostgresPool();
538
- const report = {
539
- startedAt: nowIso(),
540
- targetFolder: POSTGRES_STORAGE_DIR,
541
- migrated: {},
542
- pending: [],
543
- };
544
-
545
- try {
546
- await truncateTargetTables(pool);
547
- await migrateVersions(pool, report);
548
- await migrateTempSessions(pool, report);
549
- await migrateMemories(pool, report);
550
- await migrateDeletedChats(pool, report);
551
- await migrateSystemPrompts(pool, report);
552
- await migrateFeedback(pool, report);
553
- await migrateGuestRequestCounters(pool, report);
554
- await migrateWebSearchUsage(pool, report);
555
- await migrateMedia(pool, report);
556
- await migrateSupabaseData(pool, report);
557
-
558
- report.completedAt = nowIso();
559
- report.status = report.pending.length ? 'completed_with_pending_items' : 'completed';
560
-
561
- await writeStorageFolder(report);
562
-
563
- console.log('PostgreSQL migration complete.');
564
- console.log(JSON.stringify(report, null, 2));
565
- } finally {
566
- await pool.end();
567
- }
568
- }
569
-
570
- main().catch((err) => {
571
  console.error('PostgreSQL migration failed:', err);
572
  process.exitCode = 1;
573
  });
 
1
+ import { runPostgresMigrationCli } from '../server/postgresMigration.js';
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
+ runPostgresMigrationCli().catch((err) => {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  console.error('PostgreSQL migration failed:', err);
5
  process.exitCode = 1;
6
  });
server/dataPaths.js CHANGED
@@ -6,7 +6,25 @@ export const POSTGRES_STORAGE_DIR_NAME = 'postgresql-store';
6
  export const POSTGRES_STORAGE_DIR = path.join(DATA_ROOT, POSTGRES_STORAGE_DIR_NAME);
7
  export const POSTGRES_STORAGE_MANIFEST = path.join(POSTGRES_STORAGE_DIR, 'manifest.json');
8
 
9
- export const STORAGE_MODE = fs.existsSync(POSTGRES_STORAGE_DIR) ? 'postgres' : 'legacy';
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
  export function isPostgresStorageMode() {
12
  return STORAGE_MODE === 'postgres';
 
6
  export const POSTGRES_STORAGE_DIR = path.join(DATA_ROOT, POSTGRES_STORAGE_DIR_NAME);
7
  export const POSTGRES_STORAGE_MANIFEST = path.join(POSTGRES_STORAGE_DIR, 'manifest.json');
8
 
9
+ function detectStorageMode() {
10
+ return fs.existsSync(POSTGRES_STORAGE_DIR) ? 'postgres' : 'legacy';
11
+ }
12
+
13
+ export let STORAGE_MODE = detectStorageMode();
14
+
15
+ export function refreshStorageMode() {
16
+ STORAGE_MODE = detectStorageMode();
17
+ return STORAGE_MODE;
18
+ }
19
+
20
+ export function setStorageMode(mode) {
21
+ STORAGE_MODE = mode === 'postgres' ? 'postgres' : 'legacy';
22
+ return STORAGE_MODE;
23
+ }
24
+
25
+ export function getStorageMode() {
26
+ return STORAGE_MODE;
27
+ }
28
 
29
  export function isPostgresStorageMode() {
30
  return STORAGE_MODE === 'postgres';
server/index.js CHANGED
@@ -9,22 +9,17 @@ import fetch from 'node-fetch';
9
  import rateLimit from 'express-rate-limit';
10
  import fs from 'fs';
11
  import { registerFeedbackRoutes } from './handleFeedback.js';
12
- import { abortActiveStream, handleWsMessage } from './wsHandler.js';
13
- import { sessionStore, initStoreConfig } from './sessionStore.js';
14
  import { SUPABASE_URL, SUPABASE_ANON_KEY } from './config.js';
15
  import { safeSend } from './helpers.js';
16
- import { verifySupabaseToken } from './auth.js';
17
- import { mediaStore } from './mediaStore.js';
18
  import { initializePostgresStorage } from './postgres.js';
19
- import { POSTGRES_STORAGE_DIR, STORAGE_MODE } from './dataPaths.js';
 
20
  import { loadStoredSHA, saveStoredSHA } from './versionStore.js';
21
 
22
  export { SUPABASE_URL, SUPABASE_ANON_KEY };
23
  export { LIGHTNING_BASE, PUBLIC_URL } from './config.js';
24
 
25
  const __dirname = path.dirname(fileURLToPath(import.meta.url));
26
-
27
- initStoreConfig(SUPABASE_URL, SUPABASE_ANON_KEY);
28
  export const supabaseAnon = createClient(SUPABASE_URL, SUPABASE_ANON_KEY);
29
 
30
  const PORT = process.env.PORT || 7860;
@@ -63,10 +58,36 @@ const verifyLimiter = rateLimit({
63
 
64
  const PUBLIC_URL = process.env.PUBLIC_URL || 'default';
65
 
66
- if (STORAGE_MODE === 'postgres') {
67
- await initializePostgresStorage();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  }
69
- console.log(`[storage] mode=${STORAGE_MODE} target=${STORAGE_MODE === 'postgres' ? POSTGRES_STORAGE_DIR : '/data'}`);
 
 
 
 
 
 
 
70
 
71
  function getRequestIp(req) {
72
  return (req.headers['x-forwarded-for'] || '').split(',')[0].trim()
 
9
  import rateLimit from 'express-rate-limit';
10
  import fs from 'fs';
11
  import { registerFeedbackRoutes } from './handleFeedback.js';
 
 
12
  import { SUPABASE_URL, SUPABASE_ANON_KEY } from './config.js';
13
  import { safeSend } from './helpers.js';
 
 
14
  import { initializePostgresStorage } from './postgres.js';
15
+ import { POSTGRES_STORAGE_DIR, refreshStorageMode } from './dataPaths.js';
16
+ import { migrateLegacyDataToPostgres } from './postgresMigration.js';
17
  import { loadStoredSHA, saveStoredSHA } from './versionStore.js';
18
 
19
  export { SUPABASE_URL, SUPABASE_ANON_KEY };
20
  export { LIGHTNING_BASE, PUBLIC_URL } from './config.js';
21
 
22
  const __dirname = path.dirname(fileURLToPath(import.meta.url));
 
 
23
  export const supabaseAnon = createClient(SUPABASE_URL, SUPABASE_ANON_KEY);
24
 
25
  const PORT = process.env.PORT || 7860;
 
58
 
59
  const PUBLIC_URL = process.env.PUBLIC_URL || 'default';
60
 
61
+ async function ensureStorageBackendReady() {
62
+ let mode = refreshStorageMode();
63
+ const autoBootstrap = process.env.POSTGRES_AUTO_BOOTSTRAP !== '0';
64
+
65
+ if (mode !== 'postgres' && autoBootstrap) {
66
+ console.log(`[storage] PostgreSQL marker folder missing at ${POSTGRES_STORAGE_DIR}; starting automatic bootstrap.`);
67
+ const report = await migrateLegacyDataToPostgres({ skipIfFolderExists: true });
68
+ mode = refreshStorageMode();
69
+ if (report?.skipped) {
70
+ console.log(`[storage] PostgreSQL marker folder became available during bootstrap check.`);
71
+ } else {
72
+ console.log(`[storage] automatic PostgreSQL bootstrap ${report.status} at ${POSTGRES_STORAGE_DIR}`);
73
+ }
74
+ }
75
+
76
+ if (mode === 'postgres') {
77
+ await initializePostgresStorage();
78
+ }
79
+
80
+ console.log(`[storage] mode=${mode} target=${mode === 'postgres' ? POSTGRES_STORAGE_DIR : '/data'}`);
81
+ return mode;
82
  }
83
+
84
+ await ensureStorageBackendReady();
85
+
86
+ const { sessionStore, initStoreConfig } = await import('./sessionStore.js');
87
+ const { abortActiveStream, handleWsMessage } = await import('./wsHandler.js');
88
+ const { verifySupabaseToken } = await import('./auth.js');
89
+ const { mediaStore } = await import('./mediaStore.js');
90
+ initStoreConfig(SUPABASE_URL, SUPABASE_ANON_KEY);
91
 
92
  function getRequestIp(req) {
93
  return (req.headers['x-forwarded-for'] || '').split(',')[0].trim()
server/postgres.js CHANGED
@@ -30,7 +30,7 @@ export function buildPoolConfig() {
30
  const { PGHOST, PGPORT, PGUSER, PGPASSWORD, PGDATABASE } = process.env;
31
  if (!PGHOST || !PGUSER || !PGDATABASE) {
32
  throw new Error(
33
- `PostgreSQL storage is enabled by ${POSTGRES_STORAGE_DIR}, but no database connection is configured. Set DATABASE_URL or PGHOST/PGUSER/PGDATABASE.`
34
  );
35
  }
36
 
 
30
  const { PGHOST, PGPORT, PGUSER, PGPASSWORD, PGDATABASE } = process.env;
31
  if (!PGHOST || !PGUSER || !PGDATABASE) {
32
  throw new Error(
33
+ `PostgreSQL bootstrap/storage for ${POSTGRES_STORAGE_DIR} requires a database connection. Set DATABASE_URL or PGHOST/PGUSER/PGDATABASE.`
34
  );
35
  }
36
 
server/postgresMigration.js ADDED
@@ -0,0 +1,585 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import fs from 'fs/promises';
2
+ import path from 'path';
3
+ import { createClient } from '@supabase/supabase-js';
4
+ import { loadEncryptedJson } from './cryptoUtils.js';
5
+ import {
6
+ POSTGRES_STORAGE_DIR,
7
+ POSTGRES_STORAGE_MANIFEST,
8
+ DATA_ROOT,
9
+ refreshStorageMode,
10
+ } from './dataPaths.js';
11
+ import {
12
+ createStandalonePostgresPool,
13
+ encryptJsonPayload,
14
+ makeLookupToken,
15
+ makeOwnerLookup,
16
+ } from './postgres.js';
17
+ import { POSTGRES_SCHEMA_SQL } from './postgresSchema.js';
18
+ import { SUPABASE_URL } from './config.js';
19
+
20
+ const TEMP_TTL_MS = 24 * 60 * 60 * 1000;
21
+ const TEMP_INACTIVITY = 12 * 60 * 60 * 1000;
22
+ const FEEDBACK_AAD = 'feedback_tickets_v1';
23
+ const VERSION_FILE = path.join(DATA_ROOT, 'version.json');
24
+ const WEB_SEARCH_USAGE_FILE = path.join(DATA_ROOT, 'web-search-usage.json');
25
+ const MEDIA_INDEX_FILE = path.join(DATA_ROOT, 'media', 'index.json');
26
+ const MEDIA_BLOBS_DIR = path.join(DATA_ROOT, 'media', 'blobs');
27
+ const TEMP_STORE_FILE = path.join(DATA_ROOT, 'temp_sessions.json');
28
+ const MEMORIES_FILE = path.join(DATA_ROOT, 'memories', 'index.json');
29
+ const DELETED_CHATS_FILE = path.join(DATA_ROOT, 'deleted_chats', 'index.json');
30
+ const SYSTEM_PROMPTS_FILE = path.join(DATA_ROOT, 'system-prompts', 'index.json');
31
+ const FEEDBACK_FILE = path.join(DATA_ROOT, 'feedback_tickets.json');
32
+ const GUEST_REQUEST_FILE = path.join(DATA_ROOT, 'guest_request_counts.json');
33
+
34
+ function nowIso() {
35
+ return new Date().toISOString();
36
+ }
37
+
38
+ function sessionAad(scopeType, sessionId) {
39
+ return `chat-session:${scopeType}:${sessionId}`;
40
+ }
41
+
42
+ function guestStateLookup(tempId) {
43
+ return makeLookupToken('guest-state', tempId);
44
+ }
45
+
46
+ function guestStateAad(tempId) {
47
+ return `guest-state:${tempId}`;
48
+ }
49
+
50
+ function guestExpiryRecord(tempData) {
51
+ const createdExpires = (tempData.created || Date.now()) + TEMP_TTL_MS;
52
+ const inactiveExpires = (tempData.lastActive || Date.now()) + TEMP_INACTIVITY;
53
+ return new Date(Math.min(createdExpires, inactiveExpires)).toISOString();
54
+ }
55
+
56
+ function shareTokenLookup(token) {
57
+ return makeLookupToken('session-share-token', token);
58
+ }
59
+
60
+ function shareAad(recordId) {
61
+ return `session-share:${recordId}`;
62
+ }
63
+
64
+ function promptLookup(userId) {
65
+ return makeLookupToken('system-prompt', userId);
66
+ }
67
+
68
+ function promptAad(userId) {
69
+ return `system-prompt:${userId}`;
70
+ }
71
+
72
+ function mediaEntryAad(id) {
73
+ return `media-entry:${id}`;
74
+ }
75
+
76
+ function feedbackAad(id) {
77
+ return `feedback:${id}`;
78
+ }
79
+
80
+ function usernameLookup(username) {
81
+ return makeLookupToken('username', username);
82
+ }
83
+
84
+ function versionLookup(publicUrl) {
85
+ return makeLookupToken('app-version', publicUrl);
86
+ }
87
+
88
+ function versionAad(publicUrl) {
89
+ return `app-version:${publicUrl}`;
90
+ }
91
+
92
+ function requestLookup(ip) {
93
+ return makeLookupToken('guest-request', ip);
94
+ }
95
+
96
+ function webUsageLookup(key) {
97
+ return makeLookupToken('web-search-usage', key);
98
+ }
99
+
100
+ function webUsageAad(key, day) {
101
+ return `web-search-usage:${key}:${day}`;
102
+ }
103
+
104
+ async function fileExists(filePath) {
105
+ try {
106
+ await fs.access(filePath);
107
+ return true;
108
+ } catch {
109
+ return false;
110
+ }
111
+ }
112
+
113
+ async function readJsonIfExists(filePath) {
114
+ try {
115
+ return JSON.parse(await fs.readFile(filePath, 'utf8'));
116
+ } catch {
117
+ return null;
118
+ }
119
+ }
120
+
121
+ async function fetchAllSupabaseRows(client, tableName) {
122
+ const pageSize = 1000;
123
+ const rows = [];
124
+ let from = 0;
125
+ while (true) {
126
+ const { data, error } = await client
127
+ .from(tableName)
128
+ .select('*')
129
+ .range(from, from + pageSize - 1);
130
+ if (error) throw error;
131
+ rows.push(...(data || []));
132
+ if (!data || data.length < pageSize) break;
133
+ from += pageSize;
134
+ }
135
+ return rows;
136
+ }
137
+
138
+ async function truncateTargetTables(pool) {
139
+ await pool.query(`
140
+ TRUNCATE TABLE
141
+ media_blobs,
142
+ media_entries,
143
+ deleted_chats,
144
+ memories,
145
+ system_prompts,
146
+ feedback_tickets,
147
+ guest_request_counters,
148
+ web_search_usage,
149
+ user_settings,
150
+ user_profiles,
151
+ device_sessions,
152
+ session_shares,
153
+ chat_sessions,
154
+ guest_state,
155
+ app_versions
156
+ RESTART IDENTITY
157
+ CASCADE
158
+ `);
159
+ }
160
+
161
+ async function migrateVersions(pool, report) {
162
+ const data = await readJsonIfExists(VERSION_FILE);
163
+ const entries = Array.isArray(data) ? data : [];
164
+ let count = 0;
165
+ for (const entry of entries) {
166
+ for (const [publicUrl, sha] of Object.entries(entry || {})) {
167
+ await pool.query(
168
+ `INSERT INTO app_versions (public_url_lookup, updated_at, payload)
169
+ VALUES ($1, $2, $3::jsonb)`,
170
+ [
171
+ versionLookup(publicUrl),
172
+ nowIso(),
173
+ JSON.stringify(encryptJsonPayload({ publicUrl, sha }, versionAad(publicUrl))),
174
+ ]
175
+ );
176
+ count += 1;
177
+ }
178
+ }
179
+ report.migrated.versionEntries = count;
180
+ }
181
+
182
+ async function migrateTempSessions(pool, report) {
183
+ const data = await loadEncryptedJson(TEMP_STORE_FILE);
184
+ const records = data || {};
185
+ let ownerCount = 0;
186
+ let sessionCount = 0;
187
+
188
+ for (const [tempId, tempData] of Object.entries(records)) {
189
+ ownerCount += 1;
190
+ const owner = { type: 'guest', id: tempId };
191
+ await pool.query(
192
+ `INSERT INTO guest_state (owner_lookup, expires_at, updated_at, payload)
193
+ VALUES ($1, $2, $3, $4::jsonb)`,
194
+ [
195
+ guestStateLookup(tempId),
196
+ guestExpiryRecord(tempData),
197
+ nowIso(),
198
+ JSON.stringify(encryptJsonPayload({
199
+ tempId,
200
+ msgCount: tempData.msgCount || 0,
201
+ created: tempData.created || Date.now(),
202
+ lastActive: tempData.lastActive || Date.now(),
203
+ }, guestStateAad(tempId))),
204
+ ]
205
+ );
206
+
207
+ for (const session of Object.values(tempData.sessions || {})) {
208
+ await pool.query(
209
+ `INSERT INTO chat_sessions (id, scope_type, owner_lookup, created_at, updated_at, expires_at, payload)
210
+ VALUES ($1, $2, $3, $4, $5, $6, $7::jsonb)`,
211
+ [
212
+ session.id,
213
+ 'guest',
214
+ makeOwnerLookup(owner),
215
+ new Date(session.created || Date.now()).toISOString(),
216
+ nowIso(),
217
+ guestExpiryRecord(tempData),
218
+ JSON.stringify(encryptJsonPayload(session, sessionAad('guest', session.id))),
219
+ ]
220
+ );
221
+ sessionCount += 1;
222
+ }
223
+ }
224
+
225
+ report.migrated.tempOwners = ownerCount;
226
+ report.migrated.tempSessions = sessionCount;
227
+ }
228
+
229
+ async function migrateMemories(pool, report) {
230
+ const data = await loadEncryptedJson(MEMORIES_FILE);
231
+ const memories = Object.values(data?.memories || {});
232
+ for (const memory of memories) {
233
+ await pool.query(
234
+ `INSERT INTO memories (id, owner_lookup, created_at, updated_at, payload)
235
+ VALUES ($1, $2, $3, $4, $5::jsonb)`,
236
+ [
237
+ memory.id,
238
+ makeOwnerLookup({ type: memory.ownerType, id: memory.ownerId }),
239
+ memory.createdAt,
240
+ memory.updatedAt,
241
+ JSON.stringify(encryptJsonPayload(memory, `memory:${memory.id}`)),
242
+ ]
243
+ );
244
+ }
245
+ report.migrated.memories = memories.length;
246
+ }
247
+
248
+ async function migrateDeletedChats(pool, report) {
249
+ const data = await loadEncryptedJson(DELETED_CHATS_FILE);
250
+ const deletedChats = Object.values(data?.deletedChats || {});
251
+ for (const record of deletedChats) {
252
+ await pool.query(
253
+ `INSERT INTO deleted_chats (id, owner_lookup, purge_at, deleted_at, payload)
254
+ VALUES ($1, $2, $3, $4, $5::jsonb)`,
255
+ [
256
+ record.id,
257
+ makeOwnerLookup({ type: record.ownerType, id: record.ownerId }),
258
+ record.purgeAt || null,
259
+ record.deletedAt,
260
+ JSON.stringify(encryptJsonPayload(record, `deleted-chat:${record.id}`)),
261
+ ]
262
+ );
263
+ }
264
+ report.migrated.deletedChats = deletedChats.length;
265
+ }
266
+
267
+ async function migrateSystemPrompts(pool, report) {
268
+ const data = await loadEncryptedJson(SYSTEM_PROMPTS_FILE, 'system-prompts');
269
+ const prompts = Object.entries(data?.prompts || {});
270
+ for (const [userId, prompt] of prompts) {
271
+ const record = {
272
+ userId,
273
+ markdown: prompt.markdown,
274
+ updatedAt: prompt.updatedAt || nowIso(),
275
+ };
276
+ await pool.query(
277
+ `INSERT INTO system_prompts (owner_lookup, updated_at, payload)
278
+ VALUES ($1, $2, $3::jsonb)`,
279
+ [
280
+ promptLookup(userId),
281
+ record.updatedAt,
282
+ JSON.stringify(encryptJsonPayload(record, promptAad(userId))),
283
+ ]
284
+ );
285
+ }
286
+ report.migrated.systemPrompts = prompts.length;
287
+ }
288
+
289
+ async function migrateFeedback(pool, report) {
290
+ const data = await loadEncryptedJson(FEEDBACK_FILE, FEEDBACK_AAD);
291
+ const tickets = Array.isArray(data?.tickets) ? data.tickets : [];
292
+ for (const ticket of tickets) {
293
+ await pool.query(
294
+ `INSERT INTO feedback_tickets (id, status, submitted_at, payload)
295
+ VALUES ($1, $2, $3, $4::jsonb)`,
296
+ [
297
+ ticket.id,
298
+ ticket.status || 'open',
299
+ ticket.submittedAt,
300
+ JSON.stringify(encryptJsonPayload(ticket, feedbackAad(ticket.id))),
301
+ ]
302
+ );
303
+ }
304
+ report.migrated.feedbackTickets = tickets.length;
305
+ }
306
+
307
+ async function migrateGuestRequestCounters(pool, report) {
308
+ const data = await loadEncryptedJson(GUEST_REQUEST_FILE);
309
+ const entries = Object.entries(data || {});
310
+ for (const [ip, entry] of entries) {
311
+ await pool.query(
312
+ `INSERT INTO guest_request_counters (key_lookup, expires_at, updated_at, payload)
313
+ VALUES ($1, $2, $3, $4::jsonb)`,
314
+ [
315
+ requestLookup(ip),
316
+ new Date(entry.resetAt || Date.now()).toISOString(),
317
+ nowIso(),
318
+ JSON.stringify(encryptJsonPayload({
319
+ ip,
320
+ count: entry.count || 0,
321
+ resetAt: entry.resetAt || Date.now(),
322
+ }, 'guest-request-row')),
323
+ ]
324
+ );
325
+ }
326
+ report.migrated.guestRequestCounters = entries.length;
327
+ }
328
+
329
+ async function migrateWebSearchUsage(pool, report) {
330
+ const data = await readJsonIfExists(WEB_SEARCH_USAGE_FILE);
331
+ const days = data?.days && typeof data.days === 'object' ? data.days : {};
332
+ let count = 0;
333
+ for (const [dayKey, keys] of Object.entries(days)) {
334
+ for (const [key, used] of Object.entries(keys || {})) {
335
+ await pool.query(
336
+ `INSERT INTO web_search_usage (key_lookup, day_key, updated_at, payload)
337
+ VALUES ($1, $2, $3, $4::jsonb)`,
338
+ [
339
+ webUsageLookup(key),
340
+ dayKey,
341
+ nowIso(),
342
+ JSON.stringify(encryptJsonPayload({ used }, webUsageAad(key, dayKey))),
343
+ ]
344
+ );
345
+ count += 1;
346
+ }
347
+ }
348
+ report.migrated.webSearchUsageRows = count;
349
+ }
350
+
351
+ async function migrateMedia(pool, report) {
352
+ const data = await loadEncryptedJson(MEDIA_INDEX_FILE);
353
+ const entries = Object.values(data?.entries || {});
354
+ let blobCount = 0;
355
+ for (const entry of entries) {
356
+ await pool.query(
357
+ `INSERT INTO media_entries (
358
+ id, owner_lookup, parent_id, entry_type, updated_at, created_at,
359
+ trashed_at, purge_at, expires_at, size_bytes, payload
360
+ )
361
+ VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11::jsonb)`,
362
+ [
363
+ entry.id,
364
+ makeOwnerLookup({ type: entry.ownerType, id: entry.ownerId }),
365
+ entry.parentId || null,
366
+ entry.type,
367
+ entry.updatedAt || entry.createdAt,
368
+ entry.createdAt,
369
+ entry.trashedAt || null,
370
+ entry.purgeAt || null,
371
+ entry.expiresAt || null,
372
+ entry.size || 0,
373
+ JSON.stringify(encryptJsonPayload(entry, mediaEntryAad(entry.id))),
374
+ ]
375
+ );
376
+
377
+ if (entry.type === 'file') {
378
+ const blobPath = path.join(MEDIA_BLOBS_DIR, `${entry.id}.bin`);
379
+ if (await fileExists(blobPath)) {
380
+ const blob = await fs.readFile(blobPath);
381
+ await pool.query(
382
+ `INSERT INTO media_blobs (entry_id, updated_at, payload)
383
+ VALUES ($1, $2, $3)`,
384
+ [entry.id, entry.updatedAt || entry.createdAt, blob]
385
+ );
386
+ blobCount += 1;
387
+ }
388
+ }
389
+ }
390
+ report.migrated.mediaEntries = entries.length;
391
+ report.migrated.mediaBlobs = blobCount;
392
+ }
393
+
394
+ async function migrateSupabaseData(pool, report) {
395
+ const serviceRoleKey = process.env.SUPABASE_SERVICE_ROLE_KEY || '';
396
+ if (!serviceRoleKey) {
397
+ if (process.env.ALLOW_PARTIAL_SQL_MIGRATION === '1') {
398
+ report.pending.push(
399
+ 'Supabase-backed user tables were not exported because SUPABASE_SERVICE_ROLE_KEY is not set.'
400
+ );
401
+ return;
402
+ }
403
+ throw new Error(
404
+ 'SUPABASE_SERVICE_ROLE_KEY is required to migrate existing Supabase-backed sessions, shares, settings, and profiles before enabling PostgreSQL-only mode. Set ALLOW_PARTIAL_SQL_MIGRATION=1 only if you intentionally want to skip those records.'
405
+ );
406
+ }
407
+
408
+ const supabase = createClient(SUPABASE_URL, serviceRoleKey, {
409
+ auth: { persistSession: false },
410
+ });
411
+
412
+ const [webSessions, sharedSessions, userSettings, profiles] = await Promise.all([
413
+ fetchAllSupabaseRows(supabase, 'web_sessions'),
414
+ fetchAllSupabaseRows(supabase, 'shared_sessions'),
415
+ fetchAllSupabaseRows(supabase, 'user_settings'),
416
+ fetchAllSupabaseRows(supabase, 'profiles'),
417
+ ]);
418
+
419
+ for (const row of webSessions) {
420
+ const session = {
421
+ id: row.id,
422
+ name: row.name,
423
+ created: new Date(row.created_at).getTime(),
424
+ history: row.history || [],
425
+ model: row.model || null,
426
+ };
427
+ await pool.query(
428
+ `INSERT INTO chat_sessions (id, scope_type, owner_lookup, created_at, updated_at, expires_at, payload)
429
+ VALUES ($1, $2, $3, $4, $5, $6, $7::jsonb)`,
430
+ [
431
+ row.id,
432
+ 'user',
433
+ makeOwnerLookup({ type: 'user', id: row.user_id }),
434
+ row.created_at,
435
+ row.updated_at || nowIso(),
436
+ null,
437
+ JSON.stringify(encryptJsonPayload(session, sessionAad('user', row.id))),
438
+ ]
439
+ );
440
+ }
441
+
442
+ for (const row of sharedSessions) {
443
+ const lookup = shareTokenLookup(row.token);
444
+ const id = row.id || `share_${lookup.slice(0, 24)}`;
445
+ await pool.query(
446
+ `INSERT INTO session_shares (id, token_lookup, owner_lookup, created_at, payload)
447
+ VALUES ($1, $2, $3, $4, $5::jsonb)`,
448
+ [
449
+ id,
450
+ lookup,
451
+ makeOwnerLookup({ type: 'user', id: row.owner_id }),
452
+ row.created_at || nowIso(),
453
+ JSON.stringify(encryptJsonPayload({
454
+ id,
455
+ ownerId: row.owner_id,
456
+ sessionSnapshot: row.session_snapshot,
457
+ createdAt: row.created_at || nowIso(),
458
+ }, shareAad(id))),
459
+ ]
460
+ );
461
+ }
462
+
463
+ for (const row of userSettings) {
464
+ await pool.query(
465
+ `INSERT INTO user_settings (owner_lookup, updated_at, payload)
466
+ VALUES ($1, $2, $3::jsonb)`,
467
+ [
468
+ makeOwnerLookup({ type: 'user', id: row.user_id }),
469
+ row.updated_at || nowIso(),
470
+ JSON.stringify(encryptJsonPayload({
471
+ userId: row.user_id,
472
+ settings: row.settings || {},
473
+ updatedAt: row.updated_at || nowIso(),
474
+ }, `user-settings:${row.user_id}`)),
475
+ ]
476
+ );
477
+ }
478
+
479
+ for (const row of profiles) {
480
+ if (!row.id || !row.username) continue;
481
+ await pool.query(
482
+ `INSERT INTO user_profiles (owner_lookup, username_lookup, updated_at, payload)
483
+ VALUES ($1, $2, $3, $4::jsonb)`,
484
+ [
485
+ makeOwnerLookup({ type: 'user', id: row.id }),
486
+ usernameLookup(row.username),
487
+ row.updated_at || nowIso(),
488
+ JSON.stringify(encryptJsonPayload({
489
+ userId: row.id,
490
+ username: row.username,
491
+ updatedAt: row.updated_at || nowIso(),
492
+ }, `user-profile:${row.id}`)),
493
+ ]
494
+ );
495
+ }
496
+
497
+ report.migrated.supabaseWebSessions = webSessions.length;
498
+ report.migrated.supabaseSharedSessions = sharedSessions.length;
499
+ report.migrated.supabaseUserSettings = userSettings.length;
500
+ report.migrated.supabaseProfiles = profiles.length;
501
+ }
502
+
503
+ async function writeStorageFolder(report) {
504
+ const tempDir = `${POSTGRES_STORAGE_DIR}.tmp`;
505
+ await fs.rm(tempDir, { recursive: true, force: true });
506
+ await fs.mkdir(tempDir, { recursive: true });
507
+ await fs.writeFile(
508
+ path.join(tempDir, path.basename(POSTGRES_STORAGE_MANIFEST)),
509
+ JSON.stringify({
510
+ createdAt: report.completedAt,
511
+ storageMode: 'postgres',
512
+ status: report.status,
513
+ pending: report.pending,
514
+ }, null, 2),
515
+ 'utf8'
516
+ );
517
+ await fs.writeFile(path.join(tempDir, 'schema.sql'), POSTGRES_SCHEMA_SQL, 'utf8');
518
+ await fs.writeFile(path.join(tempDir, 'migration-report.json'), JSON.stringify(report, null, 2), 'utf8');
519
+ await fs.writeFile(
520
+ path.join(tempDir, 'README.txt'),
521
+ [
522
+ 'This folder marks PostgreSQL storage as active for the backend.',
523
+ 'The server checks for this folder on startup and uses PostgreSQL-only mode when it exists.',
524
+ 'schema.sql contains the schema used for the migrated encrypted SQL backend.',
525
+ ].join('\n'),
526
+ 'utf8'
527
+ );
528
+ await fs.rename(tempDir, POSTGRES_STORAGE_DIR);
529
+ }
530
+
531
+ export async function migrateLegacyDataToPostgres({ skipIfFolderExists = true } = {}) {
532
+ if (skipIfFolderExists && await fileExists(POSTGRES_STORAGE_DIR)) {
533
+ refreshStorageMode();
534
+ return {
535
+ status: 'already_active',
536
+ targetFolder: POSTGRES_STORAGE_DIR,
537
+ skipped: true,
538
+ migrated: {},
539
+ pending: [],
540
+ };
541
+ }
542
+
543
+ if (!skipIfFolderExists && await fileExists(POSTGRES_STORAGE_DIR)) {
544
+ throw new Error(`SQL storage folder already exists at ${POSTGRES_STORAGE_DIR}. Remove it before running the migration again.`);
545
+ }
546
+
547
+ await fs.mkdir(DATA_ROOT, { recursive: true });
548
+
549
+ const pool = await createStandalonePostgresPool();
550
+ const report = {
551
+ startedAt: nowIso(),
552
+ targetFolder: POSTGRES_STORAGE_DIR,
553
+ migrated: {},
554
+ pending: [],
555
+ };
556
+
557
+ try {
558
+ await truncateTargetTables(pool);
559
+ await migrateVersions(pool, report);
560
+ await migrateTempSessions(pool, report);
561
+ await migrateMemories(pool, report);
562
+ await migrateDeletedChats(pool, report);
563
+ await migrateSystemPrompts(pool, report);
564
+ await migrateFeedback(pool, report);
565
+ await migrateGuestRequestCounters(pool, report);
566
+ await migrateWebSearchUsage(pool, report);
567
+ await migrateMedia(pool, report);
568
+ await migrateSupabaseData(pool, report);
569
+
570
+ report.completedAt = nowIso();
571
+ report.status = report.pending.length ? 'completed_with_pending_items' : 'completed';
572
+
573
+ await writeStorageFolder(report);
574
+ refreshStorageMode();
575
+ return report;
576
+ } finally {
577
+ await pool.end();
578
+ }
579
+ }
580
+
581
+ export async function runPostgresMigrationCli() {
582
+ const report = await migrateLegacyDataToPostgres({ skipIfFolderExists: false });
583
+ console.log('PostgreSQL migration complete.');
584
+ console.log(JSON.stringify(report, null, 2));
585
+ }