Spaces:
Runtime error
Runtime error
incognitolm commited on
Commit ·
b609e05
1
Parent(s): a7c1d87
step 5
Browse files- server/index.js +14 -6
- server/postgres.js +161 -13
- server/postgresMigration.js +284 -26
server/index.js
CHANGED
|
@@ -11,9 +11,9 @@ import fs from 'fs';
|
|
| 11 |
import { registerFeedbackRoutes } from './handleFeedback.js';
|
| 12 |
import { SUPABASE_URL, SUPABASE_ANON_KEY } from './config.js';
|
| 13 |
import { safeSend } from './helpers.js';
|
| 14 |
-
import { initializePostgresStorage } from './postgres.js';
|
| 15 |
-
import { POSTGRES_STORAGE_DIR, refreshStorageMode } from './dataPaths.js';
|
| 16 |
-
import { migrateLegacyDataToPostgres } from './postgresMigration.js';
|
| 17 |
import { loadStoredSHA, saveStoredSHA } from './versionStore.js';
|
| 18 |
|
| 19 |
export { SUPABASE_URL, SUPABASE_ANON_KEY };
|
|
@@ -63,21 +63,29 @@ async function ensureStorageBackendReady() {
|
|
| 63 |
const autoBootstrap = process.env.POSTGRES_AUTO_BOOTSTRAP !== '0';
|
| 64 |
|
| 65 |
if (mode !== 'postgres' && autoBootstrap) {
|
| 66 |
-
console.log(`[storage] PostgreSQL
|
| 67 |
const report = await migrateLegacyDataToPostgres({ skipIfFolderExists: true });
|
| 68 |
mode = refreshStorageMode();
|
| 69 |
if (report?.skipped) {
|
| 70 |
-
console.log(`[storage] PostgreSQL
|
| 71 |
} else {
|
| 72 |
console.log(`[storage] automatic PostgreSQL bootstrap ${report.status} at ${POSTGRES_STORAGE_DIR}`);
|
| 73 |
}
|
| 74 |
}
|
| 75 |
|
| 76 |
if (mode === 'postgres') {
|
|
|
|
|
|
|
|
|
|
|
|
|
| 77 |
await initializePostgresStorage();
|
| 78 |
}
|
| 79 |
|
| 80 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
return mode;
|
| 82 |
}
|
| 83 |
|
|
|
|
| 11 |
import { registerFeedbackRoutes } from './handleFeedback.js';
|
| 12 |
import { SUPABASE_URL, SUPABASE_ANON_KEY } from './config.js';
|
| 13 |
import { safeSend } from './helpers.js';
|
| 14 |
+
import { getPostgresBackendType, initializePostgresStorage } from './postgres.js';
|
| 15 |
+
import { POSTGRES_STORAGE_DB_DIR, POSTGRES_STORAGE_DIR, refreshStorageMode } from './dataPaths.js';
|
| 16 |
+
import { materializeEmbeddedPostgresStorage, migrateLegacyDataToPostgres } from './postgresMigration.js';
|
| 17 |
import { loadStoredSHA, saveStoredSHA } from './versionStore.js';
|
| 18 |
|
| 19 |
export { SUPABASE_URL, SUPABASE_ANON_KEY };
|
|
|
|
| 63 |
const autoBootstrap = process.env.POSTGRES_AUTO_BOOTSTRAP !== '0';
|
| 64 |
|
| 65 |
if (mode !== 'postgres' && autoBootstrap) {
|
| 66 |
+
console.log(`[storage] PostgreSQL storage folder missing at ${POSTGRES_STORAGE_DIR}; starting automatic bootstrap.`);
|
| 67 |
const report = await migrateLegacyDataToPostgres({ skipIfFolderExists: true });
|
| 68 |
mode = refreshStorageMode();
|
| 69 |
if (report?.skipped) {
|
| 70 |
+
console.log(`[storage] PostgreSQL storage folder became available during bootstrap check.`);
|
| 71 |
} else {
|
| 72 |
console.log(`[storage] automatic PostgreSQL bootstrap ${report.status} at ${POSTGRES_STORAGE_DIR}`);
|
| 73 |
}
|
| 74 |
}
|
| 75 |
|
| 76 |
if (mode === 'postgres') {
|
| 77 |
+
const materialized = await materializeEmbeddedPostgresStorage();
|
| 78 |
+
if (materialized?.upgraded && materialized?.report) {
|
| 79 |
+
console.log(`[storage] materialized embedded PostgreSQL database at ${POSTGRES_STORAGE_DB_DIR}`);
|
| 80 |
+
}
|
| 81 |
await initializePostgresStorage();
|
| 82 |
}
|
| 83 |
|
| 84 |
+
const backend = mode === 'postgres' ? getPostgresBackendType() : 'legacy';
|
| 85 |
+
const target = mode === 'postgres'
|
| 86 |
+
? (backend === 'embedded' ? POSTGRES_STORAGE_DB_DIR : POSTGRES_STORAGE_DIR)
|
| 87 |
+
: '/data';
|
| 88 |
+
console.log(`[storage] mode=${mode}${mode === 'postgres' ? ` backend=${backend}` : ''} target=${target}`);
|
| 89 |
return mode;
|
| 90 |
}
|
| 91 |
|
server/postgres.js
CHANGED
|
@@ -1,3 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import { Pool } from 'pg';
|
| 2 |
import {
|
| 3 |
createLookupHash,
|
|
@@ -8,11 +11,19 @@ import {
|
|
| 8 |
packEncryptedBuffer,
|
| 9 |
unpackEncryptedBuffer,
|
| 10 |
} from './cryptoUtils.js';
|
| 11 |
-
import {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
import { APP_SCHEMA_NAME, POSTGRES_SCHEMA_SQL } from './postgresSchema.js';
|
| 13 |
|
| 14 |
let poolInstance = null;
|
| 15 |
let initPromise = null;
|
|
|
|
| 16 |
const configuredClients = new WeakSet();
|
| 17 |
const patchedPools = new WeakSet();
|
| 18 |
|
|
@@ -46,7 +57,98 @@ export function getSetSearchPathSql() {
|
|
| 46 |
return `SET search_path TO "${getValidatedSchemaName()}", public`;
|
| 47 |
}
|
| 48 |
|
| 49 |
-
export function
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
const connectionString = process.env.DATABASE_URL || process.env.POSTGRES_URL || '';
|
| 51 |
const ssl = resolveSslConfig();
|
| 52 |
if (connectionString) {
|
|
@@ -56,7 +158,7 @@ export function buildPoolConfig() {
|
|
| 56 |
const { PGHOST, PGPORT, PGUSER, PGPASSWORD, PGDATABASE } = process.env;
|
| 57 |
if (!PGHOST || !PGUSER || !PGDATABASE) {
|
| 58 |
throw new Error(
|
| 59 |
-
`PostgreSQL
|
| 60 |
);
|
| 61 |
}
|
| 62 |
|
|
@@ -70,15 +172,31 @@ export function buildPoolConfig() {
|
|
| 70 |
};
|
| 71 |
}
|
| 72 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 73 |
function attachPoolErrorHandler(pool) {
|
| 74 |
-
pool.on
|
| 75 |
-
|
| 76 |
-
|
|
|
|
|
|
|
| 77 |
return pool;
|
| 78 |
}
|
| 79 |
|
| 80 |
function patchPoolClientSetup(pool) {
|
| 81 |
-
if (patchedPools.has(pool)) return pool;
|
| 82 |
|
| 83 |
const originalConnect = pool.connect.bind(pool);
|
| 84 |
const originalQuery = pool.query.bind(pool);
|
|
@@ -108,14 +226,31 @@ function patchPoolClientSetup(pool) {
|
|
| 108 |
return pool;
|
| 109 |
}
|
| 110 |
|
| 111 |
-
|
| 112 |
-
const
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 113 |
attachPoolErrorHandler(pool);
|
| 114 |
return patchPoolClientSetup(pool);
|
| 115 |
}
|
| 116 |
|
| 117 |
export async function preparePostgresClient(client) {
|
| 118 |
-
if (configuredClients.has(client)) return;
|
| 119 |
await client.query(getCreateSchemaSql());
|
| 120 |
await client.query(getSetSearchPathSql());
|
| 121 |
configuredClients.add(client);
|
|
@@ -125,7 +260,11 @@ export async function applyPostgresSchema(pool) {
|
|
| 125 |
const client = await pool.connect();
|
| 126 |
try {
|
| 127 |
await preparePostgresClient(client);
|
| 128 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 129 |
} finally {
|
| 130 |
client.release();
|
| 131 |
}
|
|
@@ -135,9 +274,10 @@ async function createPool() {
|
|
| 135 |
return createStandalonePostgresPool();
|
| 136 |
}
|
| 137 |
|
| 138 |
-
export async function createStandalonePostgresPool() {
|
| 139 |
-
const pool = await createRawPostgresPool();
|
| 140 |
await applyPostgresSchema(pool);
|
|
|
|
| 141 |
return pool;
|
| 142 |
}
|
| 143 |
|
|
@@ -146,6 +286,7 @@ export async function initializePostgresStorage() {
|
|
| 146 |
if (!initPromise) {
|
| 147 |
initPromise = createPool().then((pool) => {
|
| 148 |
poolInstance = pool;
|
|
|
|
| 149 |
return pool;
|
| 150 |
});
|
| 151 |
}
|
|
@@ -178,6 +319,13 @@ export async function withPgClient(fn) {
|
|
| 178 |
}
|
| 179 |
|
| 180 |
export async function withPgTransaction(fn) {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 181 |
return withPgClient(async (client) => {
|
| 182 |
await client.query('BEGIN');
|
| 183 |
try {
|
|
|
|
| 1 |
+
import { EventEmitter } from 'events';
|
| 2 |
+
import fs from 'fs/promises';
|
| 3 |
+
import { PGlite } from '@electric-sql/pglite';
|
| 4 |
import { Pool } from 'pg';
|
| 5 |
import {
|
| 6 |
createLookupHash,
|
|
|
|
| 11 |
packEncryptedBuffer,
|
| 12 |
unpackEncryptedBuffer,
|
| 13 |
} from './cryptoUtils.js';
|
| 14 |
+
import {
|
| 15 |
+
hasEmbeddedPostgresDatabaseFiles,
|
| 16 |
+
isPostgresStorageMode,
|
| 17 |
+
POSTGRES_STORAGE_DB_DIR,
|
| 18 |
+
POSTGRES_STORAGE_DIR,
|
| 19 |
+
readPostgresStorageManifest,
|
| 20 |
+
STORAGE_MODE,
|
| 21 |
+
} from './dataPaths.js';
|
| 22 |
import { APP_SCHEMA_NAME, POSTGRES_SCHEMA_SQL } from './postgresSchema.js';
|
| 23 |
|
| 24 |
let poolInstance = null;
|
| 25 |
let initPromise = null;
|
| 26 |
+
let activeBackendType = null;
|
| 27 |
const configuredClients = new WeakSet();
|
| 28 |
const patchedPools = new WeakSet();
|
| 29 |
|
|
|
|
| 57 |
return `SET search_path TO "${getValidatedSchemaName()}", public`;
|
| 58 |
}
|
| 59 |
|
| 60 |
+
export function hasExternalPostgresConfig() {
|
| 61 |
+
const connectionString = process.env.DATABASE_URL || process.env.POSTGRES_URL || '';
|
| 62 |
+
if (connectionString) return true;
|
| 63 |
+
const { PGHOST, PGUSER, PGDATABASE } = process.env;
|
| 64 |
+
return !!(PGHOST && PGUSER && PGDATABASE);
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
function getRequestedBackendType() {
|
| 68 |
+
const raw = String(process.env.POSTGRES_STORAGE_BACKEND || '').trim().toLowerCase();
|
| 69 |
+
return raw === 'embedded' || raw === 'external' ? raw : null;
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
function getManifestBackendType() {
|
| 73 |
+
const raw = String(readPostgresStorageManifest()?.backend || '').trim().toLowerCase();
|
| 74 |
+
return raw === 'embedded' || raw === 'external' ? raw : null;
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
function resolveBackendType(explicit = null) {
|
| 78 |
+
if (explicit === 'embedded' || explicit === 'external') return explicit;
|
| 79 |
+
const requested = getRequestedBackendType();
|
| 80 |
+
if (requested) return requested;
|
| 81 |
+
const manifestBackend = getManifestBackendType();
|
| 82 |
+
if (manifestBackend) return manifestBackend;
|
| 83 |
+
if (hasEmbeddedPostgresDatabaseFiles()) return 'embedded';
|
| 84 |
+
if (hasExternalPostgresConfig()) return 'external';
|
| 85 |
+
return 'embedded';
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
function normalizeQueryResult(result = {}) {
|
| 89 |
+
const rows = Array.isArray(result.rows) ? result.rows : [];
|
| 90 |
+
const affectedRows = Number(result.affectedRows || 0);
|
| 91 |
+
return {
|
| 92 |
+
...result,
|
| 93 |
+
rows,
|
| 94 |
+
fields: Array.isArray(result.fields) ? result.fields : [],
|
| 95 |
+
rowCount: rows.length || affectedRows || 0,
|
| 96 |
+
};
|
| 97 |
+
}
|
| 98 |
+
|
| 99 |
+
class EmbeddedPostgresClient {
|
| 100 |
+
constructor(executor) {
|
| 101 |
+
this.executor = executor;
|
| 102 |
+
this.embedded = true;
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
async query(text, params = []) {
|
| 106 |
+
return normalizeQueryResult(await this.executor.query(text, params));
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
async exec(text) {
|
| 110 |
+
const results = await this.executor.exec(text);
|
| 111 |
+
return Array.isArray(results) ? results.map(normalizeQueryResult) : [];
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
release() {}
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
class EmbeddedPostgresPool extends EventEmitter {
|
| 118 |
+
constructor(db, dataDir) {
|
| 119 |
+
super();
|
| 120 |
+
this.db = db;
|
| 121 |
+
this.dataDir = dataDir;
|
| 122 |
+
this.embedded = true;
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
async connect() {
|
| 126 |
+
return new EmbeddedPostgresClient(this.db);
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
async query(text, params = []) {
|
| 130 |
+
return normalizeQueryResult(await this.db.query(text, params));
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
async exec(text) {
|
| 134 |
+
const results = await this.db.exec(text);
|
| 135 |
+
return Array.isArray(results) ? results.map(normalizeQueryResult) : [];
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
async transaction(fn) {
|
| 139 |
+
return this.db.transaction(async (tx) => fn(new EmbeddedPostgresClient(tx)));
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
async end() {
|
| 143 |
+
await this.db.close();
|
| 144 |
+
}
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
function resolveEmbeddedDataDir(dataDir = null) {
|
| 148 |
+
return dataDir || POSTGRES_STORAGE_DB_DIR;
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
function buildExternalPoolConfig() {
|
| 152 |
const connectionString = process.env.DATABASE_URL || process.env.POSTGRES_URL || '';
|
| 153 |
const ssl = resolveSslConfig();
|
| 154 |
if (connectionString) {
|
|
|
|
| 158 |
const { PGHOST, PGPORT, PGUSER, PGPASSWORD, PGDATABASE } = process.env;
|
| 159 |
if (!PGHOST || !PGUSER || !PGDATABASE) {
|
| 160 |
throw new Error(
|
| 161 |
+
`PostgreSQL storage for ${POSTGRES_STORAGE_DIR} requires either an embedded database directory or a database connection. Set POSTGRES_STORAGE_BACKEND=embedded to use the local folder, or configure DATABASE_URL / PGHOST / PGUSER / PGDATABASE for an external server.`
|
| 162 |
);
|
| 163 |
}
|
| 164 |
|
|
|
|
| 172 |
};
|
| 173 |
}
|
| 174 |
|
| 175 |
+
export function buildPoolConfig(options = {}) {
|
| 176 |
+
const backend = resolveBackendType(options.backend);
|
| 177 |
+
if (backend === 'embedded') {
|
| 178 |
+
return {
|
| 179 |
+
backend,
|
| 180 |
+
dataDir: resolveEmbeddedDataDir(options.dataDir),
|
| 181 |
+
};
|
| 182 |
+
}
|
| 183 |
+
return {
|
| 184 |
+
backend,
|
| 185 |
+
...buildExternalPoolConfig(),
|
| 186 |
+
};
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
function attachPoolErrorHandler(pool) {
|
| 190 |
+
if (typeof pool.on === 'function') {
|
| 191 |
+
pool.on('error', (err) => {
|
| 192 |
+
console.error('PostgreSQL pool error:', err);
|
| 193 |
+
});
|
| 194 |
+
}
|
| 195 |
return pool;
|
| 196 |
}
|
| 197 |
|
| 198 |
function patchPoolClientSetup(pool) {
|
| 199 |
+
if (pool?.embedded || patchedPools.has(pool)) return pool;
|
| 200 |
|
| 201 |
const originalConnect = pool.connect.bind(pool);
|
| 202 |
const originalQuery = pool.query.bind(pool);
|
|
|
|
| 226 |
return pool;
|
| 227 |
}
|
| 228 |
|
| 229 |
+
async function createEmbeddedPostgresPool({ dataDir } = {}) {
|
| 230 |
+
const resolvedDataDir = resolveEmbeddedDataDir(dataDir);
|
| 231 |
+
await fs.mkdir(resolvedDataDir, { recursive: true });
|
| 232 |
+
const db = await PGlite.create(resolvedDataDir);
|
| 233 |
+
await db.query(getCreateSchemaSql());
|
| 234 |
+
await db.query(getSetSearchPathSql());
|
| 235 |
+
return attachPoolErrorHandler(new EmbeddedPostgresPool(db, resolvedDataDir));
|
| 236 |
+
}
|
| 237 |
+
|
| 238 |
+
export function getPostgresBackendType() {
|
| 239 |
+
return activeBackendType || resolveBackendType();
|
| 240 |
+
}
|
| 241 |
+
|
| 242 |
+
export async function createRawPostgresPool(options = {}) {
|
| 243 |
+
const config = buildPoolConfig(options);
|
| 244 |
+
if (config.backend === 'embedded') {
|
| 245 |
+
return createEmbeddedPostgresPool({ dataDir: config.dataDir });
|
| 246 |
+
}
|
| 247 |
+
const pool = new Pool(config);
|
| 248 |
attachPoolErrorHandler(pool);
|
| 249 |
return patchPoolClientSetup(pool);
|
| 250 |
}
|
| 251 |
|
| 252 |
export async function preparePostgresClient(client) {
|
| 253 |
+
if (client?.embedded || configuredClients.has(client)) return;
|
| 254 |
await client.query(getCreateSchemaSql());
|
| 255 |
await client.query(getSetSearchPathSql());
|
| 256 |
configuredClients.add(client);
|
|
|
|
| 260 |
const client = await pool.connect();
|
| 261 |
try {
|
| 262 |
await preparePostgresClient(client);
|
| 263 |
+
if (typeof client.exec === 'function') {
|
| 264 |
+
await client.exec(POSTGRES_SCHEMA_SQL);
|
| 265 |
+
} else {
|
| 266 |
+
await client.query(POSTGRES_SCHEMA_SQL);
|
| 267 |
+
}
|
| 268 |
} finally {
|
| 269 |
client.release();
|
| 270 |
}
|
|
|
|
| 274 |
return createStandalonePostgresPool();
|
| 275 |
}
|
| 276 |
|
| 277 |
+
export async function createStandalonePostgresPool(options = {}) {
|
| 278 |
+
const pool = await createRawPostgresPool(options);
|
| 279 |
await applyPostgresSchema(pool);
|
| 280 |
+
activeBackendType = pool?.embedded ? 'embedded' : 'external';
|
| 281 |
return pool;
|
| 282 |
}
|
| 283 |
|
|
|
|
| 286 |
if (!initPromise) {
|
| 287 |
initPromise = createPool().then((pool) => {
|
| 288 |
poolInstance = pool;
|
| 289 |
+
activeBackendType = pool?.embedded ? 'embedded' : 'external';
|
| 290 |
return pool;
|
| 291 |
});
|
| 292 |
}
|
|
|
|
| 319 |
}
|
| 320 |
|
| 321 |
export async function withPgTransaction(fn) {
|
| 322 |
+
const pool = await getPostgresPool();
|
| 323 |
+
if (typeof pool.transaction === 'function') {
|
| 324 |
+
return pool.transaction(async (client) => {
|
| 325 |
+
await preparePostgresClient(client);
|
| 326 |
+
return fn(client);
|
| 327 |
+
});
|
| 328 |
+
}
|
| 329 |
return withPgClient(async (client) => {
|
| 330 |
await client.query('BEGIN');
|
| 331 |
try {
|
server/postgresMigration.js
CHANGED
|
@@ -1,19 +1,23 @@
|
|
| 1 |
import fs from 'fs/promises';
|
| 2 |
import path from 'path';
|
| 3 |
import { createClient } from '@supabase/supabase-js';
|
| 4 |
-
import { loadEncryptedJson } from './cryptoUtils.js';
|
| 5 |
import {
|
| 6 |
POSTGRES_STORAGE_DIR,
|
|
|
|
| 7 |
POSTGRES_STORAGE_MANIFEST,
|
| 8 |
DATA_ROOT,
|
|
|
|
| 9 |
refreshStorageMode,
|
| 10 |
} from './dataPaths.js';
|
| 11 |
import {
|
| 12 |
applyPostgresSchema,
|
| 13 |
createRawPostgresPool,
|
|
|
|
| 14 |
encryptJsonPayload,
|
| 15 |
getCreateSchemaSql,
|
| 16 |
getDropSchemaSql,
|
|
|
|
| 17 |
getPostgresSchemaName,
|
| 18 |
getSetSearchPathSql,
|
| 19 |
makeLookupToken,
|
|
@@ -106,6 +110,121 @@ function webUsageAad(key, day) {
|
|
| 106 |
return `web-search-usage:${key}:${day}`;
|
| 107 |
}
|
| 108 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 109 |
async function fileExists(filePath) {
|
| 110 |
try {
|
| 111 |
await fs.access(filePath);
|
|
@@ -371,11 +490,15 @@ async function migrateMedia(pool, report) {
|
|
| 371 |
if (entry.type === 'file') {
|
| 372 |
const blobPath = path.join(MEDIA_BLOBS_DIR, `${entry.id}.bin`);
|
| 373 |
if (await fileExists(blobPath)) {
|
| 374 |
-
const blob = await
|
| 375 |
await pool.query(
|
| 376 |
`INSERT INTO media_blobs (entry_id, updated_at, payload)
|
| 377 |
VALUES ($1, $2, $3)`,
|
| 378 |
-
[
|
|
|
|
|
|
|
|
|
|
|
|
|
| 379 |
);
|
| 380 |
blobCount += 1;
|
| 381 |
}
|
|
@@ -494,35 +617,93 @@ async function migrateSupabaseData(pool, report) {
|
|
| 494 |
report.migrated.supabaseProfiles = profiles.length;
|
| 495 |
}
|
| 496 |
|
| 497 |
-
async function writeStorageFolder(report) {
|
| 498 |
-
|
| 499 |
-
await fs.rm(tempDir, { recursive: true, force: true });
|
| 500 |
-
await fs.mkdir(tempDir, { recursive: true });
|
| 501 |
await fs.writeFile(
|
| 502 |
-
path.join(
|
| 503 |
-
JSON.stringify(
|
| 504 |
-
createdAt: report.completedAt,
|
| 505 |
-
storageMode: 'postgres',
|
| 506 |
-
status: report.status,
|
| 507 |
-
pending: report.pending,
|
| 508 |
-
}, null, 2),
|
| 509 |
'utf8'
|
| 510 |
);
|
| 511 |
-
await fs.writeFile(path.join(
|
| 512 |
-
await fs.writeFile(path.join(
|
| 513 |
await fs.writeFile(
|
| 514 |
-
path.join(
|
| 515 |
[
|
| 516 |
-
'This folder
|
| 517 |
-
|
| 518 |
'schema.sql contains the schema used for the migrated encrypted SQL backend.',
|
| 519 |
].join('\n'),
|
| 520 |
'utf8'
|
| 521 |
);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 522 |
await fs.rename(tempDir, POSTGRES_STORAGE_DIR);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 523 |
}
|
| 524 |
|
| 525 |
-
export async function migrateLegacyDataToPostgres({
|
|
|
|
|
|
|
|
|
|
|
|
|
| 526 |
if (skipIfFolderExists && await fileExists(POSTGRES_STORAGE_DIR)) {
|
| 527 |
refreshStorageMode();
|
| 528 |
return {
|
|
@@ -534,17 +715,21 @@ export async function migrateLegacyDataToPostgres({ skipIfFolderExists = true }
|
|
| 534 |
};
|
| 535 |
}
|
| 536 |
|
| 537 |
-
if (!skipIfFolderExists && await fileExists(POSTGRES_STORAGE_DIR)) {
|
| 538 |
throw new Error(`SQL storage folder already exists at ${POSTGRES_STORAGE_DIR}. Remove it before running the migration again.`);
|
| 539 |
}
|
| 540 |
|
| 541 |
await fs.mkdir(DATA_ROOT, { recursive: true });
|
| 542 |
|
| 543 |
-
const
|
|
|
|
| 544 |
const report = {
|
| 545 |
startedAt: nowIso(),
|
|
|
|
| 546 |
targetFolder: POSTGRES_STORAGE_DIR,
|
|
|
|
| 547 |
targetSchema: getPostgresSchemaName(),
|
|
|
|
| 548 |
migrated: {},
|
| 549 |
pending: [],
|
| 550 |
};
|
|
@@ -565,13 +750,86 @@ export async function migrateLegacyDataToPostgres({ skipIfFolderExists = true }
|
|
| 565 |
|
| 566 |
report.completedAt = nowIso();
|
| 567 |
report.status = report.pending.length ? 'completed_with_pending_items' : 'completed';
|
| 568 |
-
|
| 569 |
-
await writeStorageFolder(report);
|
| 570 |
-
refreshStorageMode();
|
| 571 |
-
return report;
|
| 572 |
} finally {
|
| 573 |
await pool.end();
|
| 574 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 575 |
}
|
| 576 |
|
| 577 |
export async function runPostgresMigrationCli() {
|
|
|
|
| 1 |
import fs from 'fs/promises';
|
| 2 |
import path from 'path';
|
| 3 |
import { createClient } from '@supabase/supabase-js';
|
| 4 |
+
import { loadEncryptedJson, readEncryptedFile } from './cryptoUtils.js';
|
| 5 |
import {
|
| 6 |
POSTGRES_STORAGE_DIR,
|
| 7 |
+
POSTGRES_STORAGE_DB_DIR,
|
| 8 |
POSTGRES_STORAGE_MANIFEST,
|
| 9 |
DATA_ROOT,
|
| 10 |
+
readPostgresStorageManifest,
|
| 11 |
refreshStorageMode,
|
| 12 |
} from './dataPaths.js';
|
| 13 |
import {
|
| 14 |
applyPostgresSchema,
|
| 15 |
createRawPostgresPool,
|
| 16 |
+
encryptBinaryPayload,
|
| 17 |
encryptJsonPayload,
|
| 18 |
getCreateSchemaSql,
|
| 19 |
getDropSchemaSql,
|
| 20 |
+
hasExternalPostgresConfig,
|
| 21 |
getPostgresSchemaName,
|
| 22 |
getSetSearchPathSql,
|
| 23 |
makeLookupToken,
|
|
|
|
| 110 |
return `web-search-usage:${key}:${day}`;
|
| 111 |
}
|
| 112 |
|
| 113 |
+
function mediaBlobAad(entry) {
|
| 114 |
+
return `media:${entry.id}:${entry.ownerType}:${entry.ownerId}`;
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
function getTempStorageDir() {
|
| 118 |
+
return `${POSTGRES_STORAGE_DIR}.tmp`;
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
function getTempStorageDbDir() {
|
| 122 |
+
return path.join(getTempStorageDir(), path.basename(POSTGRES_STORAGE_DB_DIR));
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
function buildStorageManifest(report, backend = 'embedded') {
|
| 126 |
+
return {
|
| 127 |
+
formatVersion: 2,
|
| 128 |
+
createdAt: report.completedAt,
|
| 129 |
+
updatedAt: report.completedAt,
|
| 130 |
+
storageMode: 'postgres',
|
| 131 |
+
backend,
|
| 132 |
+
dbDir: path.basename(POSTGRES_STORAGE_DB_DIR),
|
| 133 |
+
status: report.status,
|
| 134 |
+
pending: report.pending,
|
| 135 |
+
};
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
const SQL_TABLE_COPY_PLANS = [
|
| 139 |
+
{
|
| 140 |
+
table: 'app_versions',
|
| 141 |
+
reportKey: 'copiedAppVersions',
|
| 142 |
+
columns: ['public_url_lookup', 'updated_at', 'payload'],
|
| 143 |
+
},
|
| 144 |
+
{
|
| 145 |
+
table: 'guest_state',
|
| 146 |
+
reportKey: 'copiedGuestStateRows',
|
| 147 |
+
columns: ['owner_lookup', 'expires_at', 'updated_at', 'payload'],
|
| 148 |
+
},
|
| 149 |
+
{
|
| 150 |
+
table: 'chat_sessions',
|
| 151 |
+
reportKey: 'copiedChatSessions',
|
| 152 |
+
columns: ['id', 'scope_type', 'owner_lookup', 'created_at', 'updated_at', 'expires_at', 'payload'],
|
| 153 |
+
},
|
| 154 |
+
{
|
| 155 |
+
table: 'session_shares',
|
| 156 |
+
reportKey: 'copiedSessionShares',
|
| 157 |
+
columns: ['id', 'token_lookup', 'owner_lookup', 'created_at', 'payload'],
|
| 158 |
+
},
|
| 159 |
+
{
|
| 160 |
+
table: 'deleted_chats',
|
| 161 |
+
reportKey: 'copiedDeletedChats',
|
| 162 |
+
columns: ['id', 'owner_lookup', 'purge_at', 'deleted_at', 'payload'],
|
| 163 |
+
},
|
| 164 |
+
{
|
| 165 |
+
table: 'memories',
|
| 166 |
+
reportKey: 'copiedMemories',
|
| 167 |
+
columns: ['id', 'owner_lookup', 'created_at', 'updated_at', 'payload'],
|
| 168 |
+
},
|
| 169 |
+
{
|
| 170 |
+
table: 'media_entries',
|
| 171 |
+
reportKey: 'copiedMediaEntries',
|
| 172 |
+
columns: [
|
| 173 |
+
'id',
|
| 174 |
+
'owner_lookup',
|
| 175 |
+
'parent_id',
|
| 176 |
+
'entry_type',
|
| 177 |
+
'updated_at',
|
| 178 |
+
'created_at',
|
| 179 |
+
'trashed_at',
|
| 180 |
+
'purge_at',
|
| 181 |
+
'expires_at',
|
| 182 |
+
'size_bytes',
|
| 183 |
+
'payload',
|
| 184 |
+
],
|
| 185 |
+
},
|
| 186 |
+
{
|
| 187 |
+
table: 'media_blobs',
|
| 188 |
+
reportKey: 'copiedMediaBlobs',
|
| 189 |
+
columns: ['entry_id', 'updated_at', 'payload'],
|
| 190 |
+
},
|
| 191 |
+
{
|
| 192 |
+
table: 'system_prompts',
|
| 193 |
+
reportKey: 'copiedSystemPrompts',
|
| 194 |
+
columns: ['owner_lookup', 'updated_at', 'payload'],
|
| 195 |
+
},
|
| 196 |
+
{
|
| 197 |
+
table: 'feedback_tickets',
|
| 198 |
+
reportKey: 'copiedFeedbackTickets',
|
| 199 |
+
columns: ['id', 'status', 'submitted_at', 'payload'],
|
| 200 |
+
},
|
| 201 |
+
{
|
| 202 |
+
table: 'guest_request_counters',
|
| 203 |
+
reportKey: 'copiedGuestRequestCounters',
|
| 204 |
+
columns: ['key_lookup', 'expires_at', 'updated_at', 'payload'],
|
| 205 |
+
},
|
| 206 |
+
{
|
| 207 |
+
table: 'web_search_usage',
|
| 208 |
+
reportKey: 'copiedWebSearchUsageRows',
|
| 209 |
+
columns: ['key_lookup', 'day_key', 'updated_at', 'payload'],
|
| 210 |
+
},
|
| 211 |
+
{
|
| 212 |
+
table: 'user_settings',
|
| 213 |
+
reportKey: 'copiedUserSettings',
|
| 214 |
+
columns: ['owner_lookup', 'updated_at', 'payload'],
|
| 215 |
+
},
|
| 216 |
+
{
|
| 217 |
+
table: 'user_profiles',
|
| 218 |
+
reportKey: 'copiedUserProfiles',
|
| 219 |
+
columns: ['owner_lookup', 'username_lookup', 'updated_at', 'payload'],
|
| 220 |
+
},
|
| 221 |
+
{
|
| 222 |
+
table: 'device_sessions',
|
| 223 |
+
reportKey: 'copiedDeviceSessions',
|
| 224 |
+
columns: ['token_lookup', 'user_lookup', 'active', 'created_at', 'last_seen_at', 'payload'],
|
| 225 |
+
},
|
| 226 |
+
];
|
| 227 |
+
|
| 228 |
async function fileExists(filePath) {
|
| 229 |
try {
|
| 230 |
await fs.access(filePath);
|
|
|
|
| 490 |
if (entry.type === 'file') {
|
| 491 |
const blobPath = path.join(MEDIA_BLOBS_DIR, `${entry.id}.bin`);
|
| 492 |
if (await fileExists(blobPath)) {
|
| 493 |
+
const blob = await readEncryptedFile(blobPath, mediaBlobAad(entry));
|
| 494 |
await pool.query(
|
| 495 |
`INSERT INTO media_blobs (entry_id, updated_at, payload)
|
| 496 |
VALUES ($1, $2, $3)`,
|
| 497 |
+
[
|
| 498 |
+
entry.id,
|
| 499 |
+
entry.updatedAt || entry.createdAt,
|
| 500 |
+
encryptBinaryPayload(blob, mediaBlobAad(entry)),
|
| 501 |
+
]
|
| 502 |
);
|
| 503 |
blobCount += 1;
|
| 504 |
}
|
|
|
|
| 617 |
report.migrated.supabaseProfiles = profiles.length;
|
| 618 |
}
|
| 619 |
|
| 620 |
+
async function writeStorageFolder(targetDir, report, backend = 'embedded') {
|
| 621 |
+
await fs.mkdir(targetDir, { recursive: true });
|
|
|
|
|
|
|
| 622 |
await fs.writeFile(
|
| 623 |
+
path.join(targetDir, path.basename(POSTGRES_STORAGE_MANIFEST)),
|
| 624 |
+
JSON.stringify(buildStorageManifest(report, backend), null, 2),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 625 |
'utf8'
|
| 626 |
);
|
| 627 |
+
await fs.writeFile(path.join(targetDir, 'schema.sql'), POSTGRES_SCHEMA_SQL, 'utf8');
|
| 628 |
+
await fs.writeFile(path.join(targetDir, 'migration-report.json'), JSON.stringify(report, null, 2), 'utf8');
|
| 629 |
await fs.writeFile(
|
| 630 |
+
path.join(targetDir, 'README.txt'),
|
| 631 |
[
|
| 632 |
+
'This folder contains the encrypted PostgreSQL storage used by the backend.',
|
| 633 |
+
`The actual embedded database files live in ./${path.basename(POSTGRES_STORAGE_DB_DIR)}/.`,
|
| 634 |
'schema.sql contains the schema used for the migrated encrypted SQL backend.',
|
| 635 |
].join('\n'),
|
| 636 |
'utf8'
|
| 637 |
);
|
| 638 |
+
}
|
| 639 |
+
|
| 640 |
+
async function prepareTempEmbeddedTarget() {
|
| 641 |
+
const tempDir = getTempStorageDir();
|
| 642 |
+
await fs.rm(tempDir, { recursive: true, force: true });
|
| 643 |
+
await fs.mkdir(tempDir, { recursive: true });
|
| 644 |
+
return {
|
| 645 |
+
tempDir,
|
| 646 |
+
tempDbDir: getTempStorageDbDir(),
|
| 647 |
+
};
|
| 648 |
+
}
|
| 649 |
+
|
| 650 |
+
async function activateEmbeddedStorageFolder(tempDir) {
|
| 651 |
+
await fs.rm(POSTGRES_STORAGE_DIR, { recursive: true, force: true });
|
| 652 |
await fs.rename(tempDir, POSTGRES_STORAGE_DIR);
|
| 653 |
+
refreshStorageMode();
|
| 654 |
+
}
|
| 655 |
+
|
| 656 |
+
function buildInsertValueSql(table, columns) {
|
| 657 |
+
return columns
|
| 658 |
+
.map((column, index) => (column === 'payload' && table !== 'media_blobs'
|
| 659 |
+
? `$${index + 1}::jsonb`
|
| 660 |
+
: `$${index + 1}`))
|
| 661 |
+
.join(', ');
|
| 662 |
+
}
|
| 663 |
+
|
| 664 |
+
function normalizeCopiedSqlValue(table, column, value) {
|
| 665 |
+
if (column === 'payload' && table !== 'media_blobs' && value && typeof value === 'object' && !Buffer.isBuffer(value)) {
|
| 666 |
+
return JSON.stringify(value);
|
| 667 |
+
}
|
| 668 |
+
return value ?? null;
|
| 669 |
+
}
|
| 670 |
+
|
| 671 |
+
async function copySqlTable(sourcePool, targetPool, plan, report) {
|
| 672 |
+
const selectSql = `SELECT ${plan.columns.join(', ')} FROM ${plan.table}`;
|
| 673 |
+
const insertSql = `
|
| 674 |
+
INSERT INTO ${plan.table} (${plan.columns.join(', ')})
|
| 675 |
+
VALUES (${buildInsertValueSql(plan.table, plan.columns)})
|
| 676 |
+
`;
|
| 677 |
+
const { rows } = await sourcePool.query(selectSql);
|
| 678 |
+
for (const row of rows) {
|
| 679 |
+
await targetPool.query(
|
| 680 |
+
insertSql,
|
| 681 |
+
plan.columns.map((column) => normalizeCopiedSqlValue(plan.table, column, row[column]))
|
| 682 |
+
);
|
| 683 |
+
}
|
| 684 |
+
report.migrated[plan.reportKey] = rows.length;
|
| 685 |
+
}
|
| 686 |
+
|
| 687 |
+
async function copyExistingSqlData(sourcePool, targetPool, report) {
|
| 688 |
+
for (const plan of SQL_TABLE_COPY_PLANS) {
|
| 689 |
+
await copySqlTable(sourcePool, targetPool, plan, report);
|
| 690 |
+
}
|
| 691 |
+
}
|
| 692 |
+
|
| 693 |
+
async function hasMaterializedEmbeddedDatabase() {
|
| 694 |
+
try {
|
| 695 |
+
const entries = await fs.readdir(POSTGRES_STORAGE_DB_DIR);
|
| 696 |
+
return entries.length > 0;
|
| 697 |
+
} catch {
|
| 698 |
+
return false;
|
| 699 |
+
}
|
| 700 |
}
|
| 701 |
|
| 702 |
+
export async function migrateLegacyDataToPostgres({
|
| 703 |
+
skipIfFolderExists = true,
|
| 704 |
+
replaceExistingFolder = false,
|
| 705 |
+
sourceLabel = 'legacy_files',
|
| 706 |
+
} = {}) {
|
| 707 |
if (skipIfFolderExists && await fileExists(POSTGRES_STORAGE_DIR)) {
|
| 708 |
refreshStorageMode();
|
| 709 |
return {
|
|
|
|
| 715 |
};
|
| 716 |
}
|
| 717 |
|
| 718 |
+
if (!skipIfFolderExists && !replaceExistingFolder && await fileExists(POSTGRES_STORAGE_DIR)) {
|
| 719 |
throw new Error(`SQL storage folder already exists at ${POSTGRES_STORAGE_DIR}. Remove it before running the migration again.`);
|
| 720 |
}
|
| 721 |
|
| 722 |
await fs.mkdir(DATA_ROOT, { recursive: true });
|
| 723 |
|
| 724 |
+
const { tempDir, tempDbDir } = await prepareTempEmbeddedTarget();
|
| 725 |
+
const pool = await createRawPostgresPool({ backend: 'embedded', dataDir: tempDbDir });
|
| 726 |
const report = {
|
| 727 |
startedAt: nowIso(),
|
| 728 |
+
source: sourceLabel,
|
| 729 |
targetFolder: POSTGRES_STORAGE_DIR,
|
| 730 |
+
targetDatabaseDir: POSTGRES_STORAGE_DB_DIR,
|
| 731 |
targetSchema: getPostgresSchemaName(),
|
| 732 |
+
targetBackend: 'embedded',
|
| 733 |
migrated: {},
|
| 734 |
pending: [],
|
| 735 |
};
|
|
|
|
| 750 |
|
| 751 |
report.completedAt = nowIso();
|
| 752 |
report.status = report.pending.length ? 'completed_with_pending_items' : 'completed';
|
|
|
|
|
|
|
|
|
|
|
|
|
| 753 |
} finally {
|
| 754 |
await pool.end();
|
| 755 |
}
|
| 756 |
+
|
| 757 |
+
await writeStorageFolder(tempDir, report, 'embedded');
|
| 758 |
+
await activateEmbeddedStorageFolder(tempDir);
|
| 759 |
+
return report;
|
| 760 |
+
}
|
| 761 |
+
|
| 762 |
+
export async function materializeEmbeddedPostgresStorage() {
|
| 763 |
+
const manifest = readPostgresStorageManifest();
|
| 764 |
+
if (!manifest) {
|
| 765 |
+
return {
|
| 766 |
+
skipped: true,
|
| 767 |
+
reason: 'manifest_missing',
|
| 768 |
+
};
|
| 769 |
+
}
|
| 770 |
+
|
| 771 |
+
if (await hasMaterializedEmbeddedDatabase()) {
|
| 772 |
+
if (manifest.backend !== 'embedded') {
|
| 773 |
+
const report = {
|
| 774 |
+
startedAt: nowIso(),
|
| 775 |
+
completedAt: nowIso(),
|
| 776 |
+
source: 'manifest_refresh',
|
| 777 |
+
targetFolder: POSTGRES_STORAGE_DIR,
|
| 778 |
+
targetDatabaseDir: POSTGRES_STORAGE_DB_DIR,
|
| 779 |
+
targetSchema: getPostgresSchemaName(),
|
| 780 |
+
targetBackend: 'embedded',
|
| 781 |
+
migrated: {},
|
| 782 |
+
pending: [],
|
| 783 |
+
status: 'completed',
|
| 784 |
+
};
|
| 785 |
+
await writeStorageFolder(POSTGRES_STORAGE_DIR, report, 'embedded');
|
| 786 |
+
refreshStorageMode();
|
| 787 |
+
return { upgraded: true, report };
|
| 788 |
+
}
|
| 789 |
+
return {
|
| 790 |
+
skipped: true,
|
| 791 |
+
reason: 'already_embedded',
|
| 792 |
+
};
|
| 793 |
+
}
|
| 794 |
+
|
| 795 |
+
if (!hasExternalPostgresConfig()) {
|
| 796 |
+
const report = await migrateLegacyDataToPostgres({
|
| 797 |
+
skipIfFolderExists: false,
|
| 798 |
+
replaceExistingFolder: true,
|
| 799 |
+
sourceLabel: 'legacy_files_upgrade',
|
| 800 |
+
});
|
| 801 |
+
return { upgraded: true, report };
|
| 802 |
+
}
|
| 803 |
+
|
| 804 |
+
const { tempDir, tempDbDir } = await prepareTempEmbeddedTarget();
|
| 805 |
+
const sourcePool = await createRawPostgresPool({ backend: 'external' });
|
| 806 |
+
const targetPool = await createRawPostgresPool({ backend: 'embedded', dataDir: tempDbDir });
|
| 807 |
+
const report = {
|
| 808 |
+
startedAt: nowIso(),
|
| 809 |
+
source: 'external_postgres',
|
| 810 |
+
previousBackend: String(manifest.backend || 'external_marker'),
|
| 811 |
+
targetFolder: POSTGRES_STORAGE_DIR,
|
| 812 |
+
targetDatabaseDir: POSTGRES_STORAGE_DB_DIR,
|
| 813 |
+
targetSchema: getPostgresSchemaName(),
|
| 814 |
+
targetBackend: 'embedded',
|
| 815 |
+
migrated: {},
|
| 816 |
+
pending: [],
|
| 817 |
+
};
|
| 818 |
+
|
| 819 |
+
try {
|
| 820 |
+
await resetBootstrapSchema(targetPool);
|
| 821 |
+
await applyPostgresSchema(targetPool);
|
| 822 |
+
await copyExistingSqlData(sourcePool, targetPool, report);
|
| 823 |
+
report.completedAt = nowIso();
|
| 824 |
+
report.status = 'completed';
|
| 825 |
+
} finally {
|
| 826 |
+
await sourcePool.end();
|
| 827 |
+
await targetPool.end();
|
| 828 |
+
}
|
| 829 |
+
|
| 830 |
+
await writeStorageFolder(tempDir, report, 'embedded');
|
| 831 |
+
await activateEmbeddedStorageFolder(tempDir);
|
| 832 |
+
return { upgraded: true, report };
|
| 833 |
}
|
| 834 |
|
| 835 |
export async function runPostgresMigrationCli() {
|