Spaces:
Running
Running
Commit ·
b4d7c1c
0
Parent(s):
initialize project
Browse files- .env.example +88 -0
- .gitattributes +35 -0
- Dockerfile +133 -0
- LICENSE +34 -0
- README.md +259 -0
- cloudflare-proxy-setup.py +253 -0
- cloudflare-proxy.js +375 -0
- cloudflare-worker.js +103 -0
- docker-compose.yml +54 -0
- health-server.js +833 -0
- postiz-sync.py +382 -0
- setup-uptimerobot.sh +84 -0
- start.sh +244 -0
.env.example
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ============================================================================
|
| 2 |
+
# HuggingPost — Postiz on Hugging Face Spaces
|
| 3 |
+
# Configuration reference (.env.example)
|
| 4 |
+
#
|
| 5 |
+
# These vars map to HF Space "Variables and secrets". Paste them at:
|
| 6 |
+
# https://huggingface.co/spaces/<user>/<your-space>/settings → Secrets
|
| 7 |
+
# ============================================================================
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
# ── Required for persistence ────────────────────────────────────────────────
|
| 11 |
+
# HF token with WRITE access (https://huggingface.co/settings/tokens).
|
| 12 |
+
# Without it, all data is lost on Space restart.
|
| 13 |
+
HF_TOKEN=hf_xxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
| 14 |
+
|
| 15 |
+
# Optional. Auto-detected from HF_TOKEN if unset.
|
| 16 |
+
# HF_USERNAME=your-username
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
# ── Backup tuning ───────────────────────────────────────────────────────────
|
| 20 |
+
# Backup interval in seconds. Default 300 (5 min).
|
| 21 |
+
SYNC_INTERVAL=300
|
| 22 |
+
|
| 23 |
+
# Skip backup if tarball exceeds this size in bytes. Default 100 MB.
|
| 24 |
+
SYNC_MAX_FILE_BYTES=104857600
|
| 25 |
+
|
| 26 |
+
# Dataset name. Created at <HF_USERNAME>/<BACKUP_DATASET_NAME>, private.
|
| 27 |
+
BACKUP_DATASET_NAME=huggingpost-backup
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
# ── Postiz core (auto-derived in start.sh — override only if needed) ────────
|
| 31 |
+
# DATABASE_URL=postgresql://postiz:<auto-generated>@localhost:5432/postiz
|
| 32 |
+
# REDIS_URL=redis://localhost:6379
|
| 33 |
+
# JWT_SECRET=<auto-generated and persisted to backup>
|
| 34 |
+
|
| 35 |
+
# These are derived from SPACE_HOST automatically. Override only for local dev.
|
| 36 |
+
# FRONTEND_URL=http://localhost:7860
|
| 37 |
+
# NEXT_PUBLIC_BACKEND_URL=http://localhost:7860/api
|
| 38 |
+
# BACKEND_INTERNAL_URL=http://localhost:3000
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
# ── Media storage ───────────────────────────────────────────────────────────
|
| 42 |
+
# "local" = files saved in /postiz/uploads (included in HF Dataset backup).
|
| 43 |
+
# "cloudflare" = R2 — set the CLOUDFLARE_* vars below.
|
| 44 |
+
STORAGE_PROVIDER=local
|
| 45 |
+
# UPLOAD_DIRECTORY=/postiz/uploads
|
| 46 |
+
# NEXT_PUBLIC_UPLOAD_STATIC_DIRECTORY=/uploads
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
# ── Cloudflare R2 (only if STORAGE_PROVIDER=cloudflare) ─────────────────────
|
| 50 |
+
# Get from your Cloudflare dashboard → R2 → API Tokens.
|
| 51 |
+
# CLOUDFLARE_ACCOUNT_ID=
|
| 52 |
+
# CLOUDFLARE_ACCESS_KEY=
|
| 53 |
+
# CLOUDFLARE_SECRET_ACCESS_KEY=
|
| 54 |
+
# CLOUDFLARE_BUCKETNAME=
|
| 55 |
+
# CLOUDFLARE_BUCKET_URL=https://<bucket>.r2.cloudflarestorage.com/
|
| 56 |
+
# CLOUDFLARE_REGION=auto
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
# ── Cloudflare proxy (Optional, fixes blocked outbound) ─────────────────────
|
| 60 |
+
# CLOUDFLARE_WORKERS_TOKEN=
|
| 61 |
+
# CLOUDFLARE_PROXY_DOMAINS=* # or comma-separated list
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
# ── Email (Optional — controls signup activation flow) ──────────────────────
|
| 65 |
+
# Without RESEND_API_KEY, new signups are auto-activated.
|
| 66 |
+
# RESEND_API_KEY=
|
| 67 |
+
# EMAIL_FROM_ADDRESS=
|
| 68 |
+
# EMAIL_FROM_NAME=
|
| 69 |
+
|
| 70 |
+
# Set to true after creating your admin account to lock down the instance.
|
| 71 |
+
# DISABLE_REGISTRATION=false
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
# ── Misc ────────────────────────────────────────────────────────────────────
|
| 75 |
+
# OPENAI_API_KEY= # enables AI assistant inside Postiz
|
| 76 |
+
# API_LIMIT=30
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
# ── Developer settings (don't change unless you know why) ───────────────────
|
| 80 |
+
NX_ADD_PLUGINS=false
|
| 81 |
+
IS_GENERAL=true
|
| 82 |
+
NODE_ENV=production
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
# ── HF Spaces auto-injects these — DO NOT set manually ──────────────────────
|
| 86 |
+
# SPACE_HOST=<your-space>.hf.space
|
| 87 |
+
# SPACE_ID=<user>/<name>
|
| 88 |
+
# SPACE_AUTHOR_NAME=<user>
|
.gitattributes
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
Dockerfile
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ============================================================================
|
| 2 |
+
# HuggingPost — Postiz v2.11.3 on Hugging Face Spaces
|
| 3 |
+
#
|
| 4 |
+
# Builds Postiz from source with a Next.js basePath="/app" patch so the
|
| 5 |
+
# Postiz UI mounts at /app/* and our HuggingPost dashboard owns /.
|
| 6 |
+
#
|
| 7 |
+
# Why source build (not the prebuilt ghcr image): Next.js basePath is
|
| 8 |
+
# build-time. The official image bakes basePath="/" into the static bundle,
|
| 9 |
+
# so we'd be unable to relocate the UI to /app without rebuilding.
|
| 10 |
+
#
|
| 11 |
+
# Container layout:
|
| 12 |
+
# - nginx (port 5000, internal) — Postiz frontend + backend + uploads
|
| 13 |
+
# - PM2 → 4 Postiz procs (backend/frontend/workers/cron)
|
| 14 |
+
# - postgres (port 5432, internal)
|
| 15 |
+
# - redis (port 6379, internal)
|
| 16 |
+
# - postiz-sync.py loop — backup DB + uploads to HF Dataset
|
| 17 |
+
# - health-server.js (port 7860, public) — dashboard + reverse proxy
|
| 18 |
+
# ============================================================================
|
| 19 |
+
|
| 20 |
+
# ── Stage 1: Build Postiz with /app basePath patch ───────────────────────────
|
| 21 |
+
FROM node:22.20-alpine AS postiz-builder
|
| 22 |
+
|
| 23 |
+
WORKDIR /build
|
| 24 |
+
|
| 25 |
+
ARG NEXT_PUBLIC_VERSION=v2.11.3
|
| 26 |
+
ENV NEXT_PUBLIC_VERSION=$NEXT_PUBLIC_VERSION
|
| 27 |
+
|
| 28 |
+
RUN apk add --no-cache \
|
| 29 |
+
git \
|
| 30 |
+
g++ \
|
| 31 |
+
make \
|
| 32 |
+
py3-pip \
|
| 33 |
+
bash
|
| 34 |
+
|
| 35 |
+
RUN npm install -g pnpm@10.6.1
|
| 36 |
+
|
| 37 |
+
# Pinned to v2.11.3 — last release before Temporal became a hard requirement.
|
| 38 |
+
RUN git clone --depth=1 --branch v2.11.3 https://github.com/gitroomhq/postiz-app.git .
|
| 39 |
+
|
| 40 |
+
# Patch Next.js config to mount the frontend at /app.
|
| 41 |
+
# We inject basePath + assetPrefix immediately after `const nextConfig = {`.
|
| 42 |
+
# This makes Next.js generate links/asset URLs prefixed with /app, so the
|
| 43 |
+
# browser will hit /app/_next/* etc., which our health-server then strips
|
| 44 |
+
# back to /_next/* before passing to nginx.
|
| 45 |
+
RUN sed -i "s|const nextConfig = {|const nextConfig = {\n basePath: '/app',\n assetPrefix: '/app',|" apps/frontend/next.config.js \
|
| 46 |
+
&& grep -q "basePath: '/app'" apps/frontend/next.config.js \
|
| 47 |
+
|| (echo "BASEPATH PATCH FAILED — next.config.js shape changed upstream"; exit 1)
|
| 48 |
+
|
| 49 |
+
# Install + build. 4 GB heap for the Next.js compile.
|
| 50 |
+
RUN pnpm install --frozen-lockfile=false
|
| 51 |
+
RUN NODE_OPTIONS="--max-old-space-size=4096" pnpm run build
|
| 52 |
+
|
| 53 |
+
# Drop dev junk to shrink the runtime image.
|
| 54 |
+
RUN find . -name ".git" -type d -prune -exec rm -rf {} + 2>/dev/null || true \
|
| 55 |
+
&& rm -rf .github reports Jenkins .devcontainer 2>/dev/null || true
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
# ── Stage 2: Runtime ─────────────────────────────────────────────────────────
|
| 59 |
+
FROM node:22.20-alpine
|
| 60 |
+
|
| 61 |
+
WORKDIR /app
|
| 62 |
+
|
| 63 |
+
# System deps — same set as upstream's Dockerfile.dev (bash, nginx, py3-pip)
|
| 64 |
+
# plus postgres + redis + extras we need.
|
| 65 |
+
RUN apk add --no-cache \
|
| 66 |
+
bash \
|
| 67 |
+
curl \
|
| 68 |
+
ca-certificates \
|
| 69 |
+
openssl \
|
| 70 |
+
jq \
|
| 71 |
+
nginx \
|
| 72 |
+
postgresql16 \
|
| 73 |
+
postgresql16-contrib \
|
| 74 |
+
postgresql16-client \
|
| 75 |
+
redis \
|
| 76 |
+
py3-pip \
|
| 77 |
+
su-exec
|
| 78 |
+
|
| 79 |
+
# nginx user — upstream uses 'www'. Mirror that so its nginx.conf works.
|
| 80 |
+
RUN adduser -D -g 'www' www \
|
| 81 |
+
&& mkdir -p /var/lib/nginx /var/log/nginx \
|
| 82 |
+
&& chown -R www:www /var/lib/nginx
|
| 83 |
+
|
| 84 |
+
# pnpm + pm2 to run Postiz processes the same way upstream does
|
| 85 |
+
RUN npm install -g pnpm@10.6.1 pm2
|
| 86 |
+
|
| 87 |
+
# Python deps for HF Dataset sync
|
| 88 |
+
RUN pip install --no-cache-dir --break-system-packages \
|
| 89 |
+
huggingface_hub \
|
| 90 |
+
PyYAML
|
| 91 |
+
|
| 92 |
+
# Copy fully-built Postiz into /app
|
| 93 |
+
COPY --from=postiz-builder /build /app
|
| 94 |
+
|
| 95 |
+
# Use upstream's nginx.conf — defines the routing nginx :5000 → backend :3000
|
| 96 |
+
# (under /api), uploads alias, and frontend :4200 (under /). HuggingPost's
|
| 97 |
+
# health-server already strips /app before forwarding here, so nginx sees
|
| 98 |
+
# the same paths it expects in the upstream layout.
|
| 99 |
+
COPY --from=postiz-builder /build/var/docker/nginx.conf /etc/nginx/nginx.conf
|
| 100 |
+
|
| 101 |
+
# Health-server lives outside /app so its node_modules don't collide with
|
| 102 |
+
# Postiz's pnpm workspaces.
|
| 103 |
+
RUN mkdir -p /opt/healthsrv && cd /opt/healthsrv && \
|
| 104 |
+
npm init -y >/dev/null && \
|
| 105 |
+
npm install --no-save --no-audit --no-fund express@4 cors morgan
|
| 106 |
+
|
| 107 |
+
# Postgres/Redis/uploads dirs — all under /postiz so postiz-sync.py can
|
| 108 |
+
# include them in the backup tarball.
|
| 109 |
+
RUN mkdir -p /var/run/postgresql /postiz/pgdata /postiz/redis /postiz/uploads /postiz/.secrets \
|
| 110 |
+
&& chown -R postgres:postgres /var/run/postgresql /postiz/pgdata \
|
| 111 |
+
&& chmod 700 /postiz/pgdata
|
| 112 |
+
|
| 113 |
+
# Symlink /uploads → /postiz/uploads so nginx's `alias /uploads/` picks up
|
| 114 |
+
# media stored in the persisted tree.
|
| 115 |
+
RUN ln -sf /postiz/uploads /uploads
|
| 116 |
+
|
| 117 |
+
# Copy orchestration files
|
| 118 |
+
COPY start.sh /opt/start.sh
|
| 119 |
+
COPY health-server.js /opt/healthsrv/health-server.js
|
| 120 |
+
COPY postiz-sync.py /opt/postiz-sync.py
|
| 121 |
+
COPY cloudflare-proxy.js /opt/cloudflare-proxy.js
|
| 122 |
+
COPY cloudflare-proxy-setup.py /opt/cloudflare-proxy-setup.py
|
| 123 |
+
COPY cloudflare-worker.js /opt/cloudflare-worker.js
|
| 124 |
+
COPY setup-uptimerobot.sh /opt/setup-uptimerobot.sh
|
| 125 |
+
|
| 126 |
+
RUN chmod +x /opt/start.sh /opt/setup-uptimerobot.sh
|
| 127 |
+
|
| 128 |
+
EXPOSE 7860
|
| 129 |
+
|
| 130 |
+
HEALTHCHECK --interval=30s --timeout=10s --start-period=240s --retries=3 \
|
| 131 |
+
CMD curl -f http://localhost:7860/health || exit 1
|
| 132 |
+
|
| 133 |
+
CMD ["/opt/start.sh"]
|
LICENSE
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2026 HuggingPost Contributors
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
| 22 |
+
|
| 23 |
+
---
|
| 24 |
+
|
| 25 |
+
The wrapper / orchestration code in this repository is MIT-licensed.
|
| 26 |
+
|
| 27 |
+
Postiz (https://github.com/gitroomhq/postiz-app) — the application this
|
| 28 |
+
project deploys — is licensed AGPL-3.0. By running Postiz via this Space you
|
| 29 |
+
agree to the terms of the AGPL-3.0 license.
|
| 30 |
+
|
| 31 |
+
This project also builds upon orchestration patterns from:
|
| 32 |
+
- HuggingClip: https://huggingface.co/spaces/somratpro/HuggingClip
|
| 33 |
+
- HuggingClaw: https://github.com/democra-ai/HuggingClaw (Apache 2.0)
|
| 34 |
+
- Hugging8n: https://github.com/somratpro/Hugging8n (MIT)
|
README.md
ADDED
|
@@ -0,0 +1,259 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: HuggingPost
|
| 3 |
+
emoji: 📮
|
| 4 |
+
colorFrom: pink
|
| 5 |
+
colorTo: indigo
|
| 6 |
+
sdk: docker
|
| 7 |
+
app_port: 7860
|
| 8 |
+
pinned: true
|
| 9 |
+
license: agpl-3.0
|
| 10 |
+
secrets:
|
| 11 |
+
- name: HF_TOKEN
|
| 12 |
+
description: HF token with WRITE access — enables DB+uploads backup persistence to a private HF Dataset.
|
| 13 |
+
- name: JWT_SECRET
|
| 14 |
+
description: (Optional) Random 48-byte string. Auto-generated on first boot and persisted to backup.
|
| 15 |
+
- name: CLOUDFLARE_WORKERS_TOKEN
|
| 16 |
+
description: (Optional) Cloudflare API token (Workers Scripts → Edit) to auto-provision an outbound proxy.
|
| 17 |
+
- name: RESEND_API_KEY
|
| 18 |
+
description: (Optional) Resend key for sending email activation links. Without it, registration is auto-activated.
|
| 19 |
+
- name: STORAGE_PROVIDER
|
| 20 |
+
description: (Optional) "local" (default) or "cloudflare" to offload media to R2.
|
| 21 |
+
- name: CLOUDFLARE_ACCOUNT_ID
|
| 22 |
+
description: (Optional, if STORAGE_PROVIDER=cloudflare) R2 account ID.
|
| 23 |
+
- name: CLOUDFLARE_ACCESS_KEY
|
| 24 |
+
description: (Optional, if STORAGE_PROVIDER=cloudflare) R2 access key ID.
|
| 25 |
+
- name: CLOUDFLARE_SECRET_ACCESS_KEY
|
| 26 |
+
description: (Optional, if STORAGE_PROVIDER=cloudflare) R2 secret access key.
|
| 27 |
+
- name: CLOUDFLARE_BUCKETNAME
|
| 28 |
+
description: (Optional, if STORAGE_PROVIDER=cloudflare) R2 bucket name.
|
| 29 |
+
- name: CLOUDFLARE_BUCKET_URL
|
| 30 |
+
description: (Optional, if STORAGE_PROVIDER=cloudflare) R2 public bucket URL.
|
| 31 |
+
---
|
| 32 |
+
|
| 33 |
+
[](https://github.com/somratpro/huggingpost)
|
| 34 |
+
[](https://opensource.org/licenses/MIT)
|
| 35 |
+
[](https://huggingface.co/spaces/somratpro/HuggingPost)
|
| 36 |
+
[](https://github.com/gitroomhq/postiz-app)
|
| 37 |
+
|
| 38 |
+
**Self-host [Postiz](https://postiz.com) (open-source social-media scheduler — X, LinkedIn, Facebook, Threads, TikTok, YouTube, Pinterest, Reddit, Mastodon, Discord, Slack, and more) on the free Hugging Face Spaces tier.** Persistent across restarts via private HF Dataset backup. No external database, no paid storage required.
|
| 39 |
+
|
| 40 |
+
## Table of Contents
|
| 41 |
+
|
| 42 |
+
- [✨ Features](#-features)
|
| 43 |
+
- [🚀 Quick Start](#-quick-start)
|
| 44 |
+
- [🔑 Configuration](#-configuration)
|
| 45 |
+
- [💾 Backup & Persistence](#-backup--persistence)
|
| 46 |
+
- [💓 Keep It Awake](#-keep-it-awake)
|
| 47 |
+
- [🌐 Cloudflare Proxy *(Optional)*](#-cloudflare-proxy-optional)
|
| 48 |
+
- [🔌 Connecting Social Accounts](#-connecting-social-accounts)
|
| 49 |
+
- [🏗️ Architecture](#️-architecture)
|
| 50 |
+
- [🐛 Troubleshooting](#-troubleshooting)
|
| 51 |
+
- [📚 Links](#-links)
|
| 52 |
+
|
| 53 |
+
## ✨ Features
|
| 54 |
+
|
| 55 |
+
- 📅 **30+ Social Platforms** — schedule posts to X, LinkedIn, Facebook, Threads, TikTok, YouTube, Reddit, Mastodon, Discord, Slack, Pinterest, etc.
|
| 56 |
+
- ⚡ **One-click deploy** — duplicate the Space, add `HF_TOKEN`, you're done.
|
| 57 |
+
- 💾 **Persistent across restarts** — PostgreSQL + uploaded media auto-backed up to a private HF Dataset every 5 min and restored on boot.
|
| 58 |
+
- 💓 **Keep-Alive** — built-in dashboard helper to set up an UptimeRobot monitor so scheduled posts actually fire.
|
| 59 |
+
- 🌐 **Outbound firewall workaround** — optional Cloudflare Worker proxy auto-provisioned for blocked platform APIs.
|
| 60 |
+
- 🔒 **Secrets generated** — `JWT_SECRET` auto-generated on first boot and persisted, no manual setup.
|
| 61 |
+
- 🏠 **100% HF-Native** — no external Postgres/Redis/storage accounts needed for the default path.
|
| 62 |
+
- 📌 **Pinned to v2.11.3** — last release before Postiz mandated Temporal (which doesn't fit in a single HF container).
|
| 63 |
+
|
| 64 |
+
## 🚀 Quick Start
|
| 65 |
+
|
| 66 |
+
### Step 1: Duplicate this Space
|
| 67 |
+
|
| 68 |
+
[](https://huggingface.co/spaces/somratpro/HuggingPost?duplicate=true)
|
| 69 |
+
|
| 70 |
+
### Step 2: Add `HF_TOKEN`
|
| 71 |
+
|
| 72 |
+
In your new Space's **Settings → Variables and secrets → New secret**:
|
| 73 |
+
|
| 74 |
+
| Secret | How to get it |
|
| 75 |
+
| :--- | :--- |
|
| 76 |
+
| `HF_TOKEN` | [huggingface.co/settings/tokens](https://huggingface.co/settings/tokens) → New token → **Write** access |
|
| 77 |
+
|
| 78 |
+
> [!WARNING]
|
| 79 |
+
> Without `HF_TOKEN`, your data (accounts, scheduled posts, uploaded media) is **lost on every Space restart**. Set this up first.
|
| 80 |
+
|
| 81 |
+
### Step 3: Wait for the build (~5–8 min first time)
|
| 82 |
+
|
| 83 |
+
Watch progress in the **Logs** tab. The Postiz build is heavy because it compiles a Next.js frontend + NestJS backend.
|
| 84 |
+
|
| 85 |
+
### Step 4: Open the Space
|
| 86 |
+
|
| 87 |
+
Land on the HuggingPost dashboard. Click **Open Postiz →** to reach the login page. **Sign up** to create the first admin account — registration is auto-activated unless you set `RESEND_API_KEY`.
|
| 88 |
+
|
| 89 |
+
### Step 5: Set Up Keep-Alive (1 min)
|
| 90 |
+
|
| 91 |
+
On the dashboard, paste your UptimeRobot **Main API key** to create an external monitor that pings `/health` every 5 min. Without this, the Space will sleep and scheduled posts won't fire.
|
| 92 |
+
|
| 93 |
+
## 🔑 Configuration
|
| 94 |
+
|
| 95 |
+
### Required
|
| 96 |
+
|
| 97 |
+
| Variable | Purpose |
|
| 98 |
+
| :--- | :--- |
|
| 99 |
+
| `HF_TOKEN` | Write-access HF token — enables backup persistence |
|
| 100 |
+
|
| 101 |
+
### Recommended
|
| 102 |
+
|
| 103 |
+
| Variable | Default | Purpose |
|
| 104 |
+
| :--- | :--- | :--- |
|
| 105 |
+
| `SYNC_INTERVAL` | `300` | Backup interval in seconds (5 min) |
|
| 106 |
+
| `BACKUP_DATASET_NAME` | `huggingpost-backup` | Private dataset name (`<user>/<name>`) |
|
| 107 |
+
| `RESEND_API_KEY` | — | Required only if you want signup activation emails |
|
| 108 |
+
|
| 109 |
+
### Storage (Optional — for media offload)
|
| 110 |
+
|
| 111 |
+
By default, uploaded media (post images/videos) is stored in `/postiz/uploads` inside the container and included in the HF Dataset backup. If your media exceeds ~80 MB total, switch to Cloudflare R2:
|
| 112 |
+
|
| 113 |
+
| Variable | Purpose |
|
| 114 |
+
| :--- | :--- |
|
| 115 |
+
| `STORAGE_PROVIDER` | Set to `cloudflare` |
|
| 116 |
+
| `CLOUDFLARE_ACCOUNT_ID` | R2 account ID |
|
| 117 |
+
| `CLOUDFLARE_ACCESS_KEY` | R2 access key |
|
| 118 |
+
| `CLOUDFLARE_SECRET_ACCESS_KEY` | R2 secret |
|
| 119 |
+
| `CLOUDFLARE_BUCKETNAME` | R2 bucket name |
|
| 120 |
+
| `CLOUDFLARE_BUCKET_URL` | Public R2 URL prefix |
|
| 121 |
+
|
| 122 |
+
R2 free tier is 10 GB storage + 1M reads/month — plenty for typical use.
|
| 123 |
+
|
| 124 |
+
### Advanced
|
| 125 |
+
|
| 126 |
+
| Variable | Default | Purpose |
|
| 127 |
+
| :--- | :--- | :--- |
|
| 128 |
+
| `JWT_SECRET` | auto-generated | If unset, generated and persisted on first boot |
|
| 129 |
+
| `SYNC_MAX_FILE_BYTES` | `104857600` (100 MB) | Skip backup if tarball exceeds this size |
|
| 130 |
+
| `DISABLE_REGISTRATION` | `false` | Set to `true` after creating your admin account |
|
| 131 |
+
| `API_LIMIT` | `30` | Public API hourly rate limit |
|
| 132 |
+
|
| 133 |
+
## 💾 Backup & Persistence
|
| 134 |
+
|
| 135 |
+
Every `SYNC_INTERVAL` seconds (default 5 min), HuggingPost:
|
| 136 |
+
|
| 137 |
+
1. Runs `pg_dump` on the Postiz database.
|
| 138 |
+
2. Tars the dump + `/postiz/uploads` + `/postiz/.secrets`.
|
| 139 |
+
3. Uploads `snapshots/latest.tar.gz` to your private dataset `<your-username>/huggingpost-backup`.
|
| 140 |
+
|
| 141 |
+
On boot, the reverse happens — secrets restored first, then DB drop+recreate+replay, then uploads copied back. Your scheduled posts, accounts, and media survive restarts.
|
| 142 |
+
|
| 143 |
+
**To inspect or download your backup:**
|
| 144 |
+
|
| 145 |
+
```bash
|
| 146 |
+
huggingface-cli download --repo-type dataset <your-username>/huggingpost-backup
|
| 147 |
+
```
|
| 148 |
+
|
| 149 |
+
> [!NOTE]
|
| 150 |
+
> The dataset is **private** by default. Don't share its URL publicly — the SQL dump contains your full Postiz state, including encrypted social-media tokens.
|
| 151 |
+
|
| 152 |
+
## 💓 Keep It Awake
|
| 153 |
+
|
| 154 |
+
Free HF Spaces sleep after ~48h of no traffic. A sleeping Space cannot fire scheduled posts. The dashboard has a one-time setup form for [UptimeRobot](https://uptimerobot.com) — create a free account, copy your **Main API key** (NOT a Read-only or Monitor-specific key), paste it in the dashboard.
|
| 155 |
+
|
| 156 |
+
This works for **public** Spaces only. Private Spaces cannot be reached by external monitors.
|
| 157 |
+
|
| 158 |
+
## 🌐 Cloudflare Proxy *(Optional)*
|
| 159 |
+
|
| 160 |
+
Hugging Face Spaces sometimes block outbound HTTP to specific social-platform APIs. HuggingPost ships the same transparent Cloudflare Worker proxy used in HuggingClip / HuggingClaw / Hugging8n.
|
| 161 |
+
|
| 162 |
+
**Auto-setup:**
|
| 163 |
+
|
| 164 |
+
1. Create a Cloudflare API Token with `Workers Scripts: Edit` permission.
|
| 165 |
+
2. Add `CLOUDFLARE_WORKERS_TOKEN` as a Space secret.
|
| 166 |
+
3. Restart the Space.
|
| 167 |
+
|
| 168 |
+
HuggingPost will create or update a Worker named `<your-space-host>-proxy` and route blocked outbound traffic through it transparently. You can scope it with `CLOUDFLARE_PROXY_DOMAINS` (default `*` = all external).
|
| 169 |
+
|
| 170 |
+
## 🔌 Connecting Social Accounts
|
| 171 |
+
|
| 172 |
+
Each social platform requires you to register your Postiz instance as an OAuth app. The callback URL pattern is:
|
| 173 |
+
|
| 174 |
+
```
|
| 175 |
+
https://<your-space-host>/api/integrations/social/<platform>/callback
|
| 176 |
+
```
|
| 177 |
+
|
| 178 |
+
For each platform you want (X, LinkedIn, Facebook, etc.), follow the [Postiz provider docs](https://docs.postiz.com/providers) to obtain client ID + secret, then enter them inside Postiz **Settings → Channels** (NOT as Space secrets — Postiz stores them encrypted in its DB).
|
| 179 |
+
|
| 180 |
+
> [!TIP]
|
| 181 |
+
> Some platforms (like X) require a publicly verifiable domain. The HF Space subdomain (`*.hf.space`) works for most but not all platforms. Check each platform's app-creation requirements.
|
| 182 |
+
|
| 183 |
+
## 🏗️ Architecture
|
| 184 |
+
|
| 185 |
+
```
|
| 186 |
+
HuggingPost/
|
| 187 |
+
├── Dockerfile # Two-stage: build Postiz v2.11.3 → runtime
|
| 188 |
+
├── start.sh # Orchestrator (Postgres → Redis → restore → procs)
|
| 189 |
+
├── health-server.js # Port 7860: dashboard + reverse proxy split
|
| 190 |
+
├── postiz-sync.py # Backup/restore DB + uploads to HF Dataset
|
| 191 |
+
├── cloudflare-proxy.js # Transparent outbound proxy injected via NODE_OPTIONS
|
| 192 |
+
├── cloudflare-proxy-setup.py
|
| 193 |
+
├── cloudflare-worker.js
|
| 194 |
+
├── setup-uptimerobot.sh
|
| 195 |
+
├── docker-compose.yml # Local dev convenience
|
| 196 |
+
├── .env.example # Configuration reference
|
| 197 |
+
└── README.md
|
| 198 |
+
```
|
| 199 |
+
|
| 200 |
+
**Single-port routing** (port 7860, the only port HF Spaces exposes):
|
| 201 |
+
|
| 202 |
+
| Path | Target | Notes |
|
| 203 |
+
| :--- | :--- | :--- |
|
| 204 |
+
| `/` | dashboard (local) | HTML status page |
|
| 205 |
+
| `/health`, `/status`, `/uptimerobot/setup` | local | JSON / handlers |
|
| 206 |
+
| `/api/*` | backend `:3000` | `/api` prefix stripped |
|
| 207 |
+
| `/uploads/*` | backend `:3000` | media files |
|
| 208 |
+
| `/*` | frontend `:4200` | Next.js pages, `/_next/*`, etc. |
|
| 209 |
+
|
| 210 |
+
**Internal processes:**
|
| 211 |
+
|
| 212 |
+
| Process | Port | Memory cap |
|
| 213 |
+
| :--- | :--- | :--- |
|
| 214 |
+
| `health-server.js` | 7860 (public) | — |
|
| 215 |
+
| Postiz frontend (Next.js) | 4200 | 2 GB |
|
| 216 |
+
| Postiz backend (NestJS) | 3000 | 2 GB |
|
| 217 |
+
| Postiz workers | — | 1 GB |
|
| 218 |
+
| Postiz cron | — | 512 MB |
|
| 219 |
+
| `postgres` | 5432 | — |
|
| 220 |
+
| `redis-server` | 6379 | — |
|
| 221 |
+
| `postiz-sync.py` (loop) | — | — |
|
| 222 |
+
|
| 223 |
+
Total resident set ~3–6 GB under typical load — well within HF free tier's 16 GB.
|
| 224 |
+
|
| 225 |
+
## 🐛 Troubleshooting
|
| 226 |
+
|
| 227 |
+
**"Postiz backend unavailable" on first load**
|
| 228 |
+
First boot takes 30–90s after the build finishes. Wait for the dashboard to show green badges for both backend and frontend.
|
| 229 |
+
|
| 230 |
+
**Data lost after restart**
|
| 231 |
+
`HF_TOKEN` is not set, or it doesn't have write access. Add it and the next restart will restore from backup. The backup must have run at least once before the restart.
|
| 232 |
+
|
| 233 |
+
**Backup too large (>100 MB)**
|
| 234 |
+
Either move media to Cloudflare R2 (`STORAGE_PROVIDER=cloudflare`) or raise `SYNC_MAX_FILE_BYTES`. The HF Dataset itself supports much larger files, but huge backups slow restart.
|
| 235 |
+
|
| 236 |
+
**Scheduled posts didn't fire while I was away**
|
| 237 |
+
The Space slept. Set up UptimeRobot from the dashboard.
|
| 238 |
+
|
| 239 |
+
**OAuth callback fails for X/Facebook/LinkedIn**
|
| 240 |
+
Some platforms reject `*.hf.space` subdomains as redirect URIs. You may need to put a custom domain in front (Cloudflare → HF Space CNAME).
|
| 241 |
+
|
| 242 |
+
**Out of memory during build**
|
| 243 |
+
The Next.js build needs `--max-old-space-size=4096`. If you forked and changed the Dockerfile, make sure the `NODE_OPTIONS` flag is still on the `pnpm run build` line.
|
| 244 |
+
|
| 245 |
+
**`prisma-db-push` fails on first boot**
|
| 246 |
+
Usually means Postgres didn't finish starting. Container will exit and HF will auto-restart — second boot usually succeeds. If it persists, check Logs for the actual Prisma error.
|
| 247 |
+
|
| 248 |
+
## 📚 Links
|
| 249 |
+
|
| 250 |
+
- [Postiz on GitHub](https://github.com/gitroomhq/postiz-app)
|
| 251 |
+
- [Postiz docs](https://docs.postiz.com)
|
| 252 |
+
- [HuggingFace Spaces docs](https://huggingface.co/docs/hub/spaces)
|
| 253 |
+
- Sister projects: [HuggingClip](https://huggingface.co/spaces/somratpro/HuggingClip) (Paperclip), [Hugging8n](https://huggingface.co/spaces/somratpro/Hugging8n) (n8n)
|
| 254 |
+
|
| 255 |
+
## 📄 License
|
| 256 |
+
|
| 257 |
+
Wrapper code: MIT. Postiz itself: AGPL-3.0 — see [github.com/gitroomhq/postiz-app](https://github.com/gitroomhq/postiz-app/blob/main/LICENSE) for terms.
|
| 258 |
+
|
| 259 |
+
*Made with ❤️ by [@somratpro](https://github.com/somratpro)*
|
cloudflare-proxy-setup.py
ADDED
|
@@ -0,0 +1,253 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
|
| 3 |
+
import json
|
| 4 |
+
import os
|
| 5 |
+
import re
|
| 6 |
+
import secrets
|
| 7 |
+
import sys
|
| 8 |
+
import urllib.error
|
| 9 |
+
import urllib.request
|
| 10 |
+
from pathlib import Path
|
| 11 |
+
|
| 12 |
+
API_BASE = "https://api.cloudflare.com/client/v4"
|
| 13 |
+
ENV_FILE = Path("/tmp/huggingpost-cloudflare-proxy.env")
|
| 14 |
+
DEFAULT_ALLOWED = [
|
| 15 |
+
"api.telegram.org",
|
| 16 |
+
"discord.com",
|
| 17 |
+
"discordapp.com",
|
| 18 |
+
"gateway.discord.gg",
|
| 19 |
+
"status.discord.com",
|
| 20 |
+
"web.whatsapp.com",
|
| 21 |
+
"graph.facebook.com",
|
| 22 |
+
"googleapis.com",
|
| 23 |
+
"google.com",
|
| 24 |
+
"googleusercontent.com",
|
| 25 |
+
"gstatic.com",
|
| 26 |
+
]
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def cf_request(method: str, path: str, token: str, body: bytes | None = None, content_type: str = "application/json"):
|
| 30 |
+
req = urllib.request.Request(
|
| 31 |
+
f"{API_BASE}{path}",
|
| 32 |
+
data=body,
|
| 33 |
+
method=method,
|
| 34 |
+
headers={
|
| 35 |
+
"Authorization": f"Bearer {token}",
|
| 36 |
+
"Content-Type": content_type,
|
| 37 |
+
},
|
| 38 |
+
)
|
| 39 |
+
with urllib.request.urlopen(req, timeout=30) as response:
|
| 40 |
+
payload = json.loads(response.read().decode("utf-8"))
|
| 41 |
+
if not payload.get("success"):
|
| 42 |
+
errors = payload.get("errors") or [{"message": "Unknown Cloudflare API error"}]
|
| 43 |
+
raise RuntimeError(errors[0].get("message", "Unknown Cloudflare API error"))
|
| 44 |
+
return payload["result"]
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def slugify(value: str) -> str:
|
| 48 |
+
cleaned = re.sub(r"[^a-z0-9-]+", "-", value.lower()).strip("-")
|
| 49 |
+
cleaned = re.sub(r"-{2,}", "-", cleaned)
|
| 50 |
+
if not cleaned:
|
| 51 |
+
cleaned = "huggingpost-proxy"
|
| 52 |
+
return cleaned[:63].rstrip("-")
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def derive_worker_name() -> str:
|
| 56 |
+
explicit = os.environ.get("CLOUDFLARE_WORKER_NAME", "").strip()
|
| 57 |
+
if explicit:
|
| 58 |
+
return slugify(explicit)
|
| 59 |
+
space_host = os.environ.get("SPACE_HOST", "").strip()
|
| 60 |
+
if space_host:
|
| 61 |
+
base = space_host.replace(".hf.space", "")
|
| 62 |
+
return slugify(f"{base}-proxy")
|
| 63 |
+
return "huggingpost-proxy"
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def render_worker(secret_value: str, allowed_targets: list[str], allow_proxy_all: bool) -> str:
|
| 67 |
+
allowed_json = json.dumps(allowed_targets)
|
| 68 |
+
allow_all_js = "true" if allow_proxy_all else "false"
|
| 69 |
+
secret_json = json.dumps(secret_value)
|
| 70 |
+
return f"""addEventListener("fetch", (event) => {{
|
| 71 |
+
event.respondWith(handleRequest(event.request));
|
| 72 |
+
}});
|
| 73 |
+
|
| 74 |
+
const PROXY_SHARED_SECRET = {secret_json};
|
| 75 |
+
const ALLOW_PROXY_ALL = {allow_all_js};
|
| 76 |
+
const ALLOWED_TARGETS = {allowed_json};
|
| 77 |
+
|
| 78 |
+
function isAllowedHost(hostname) {{
|
| 79 |
+
const normalized = String(hostname || "").trim().toLowerCase();
|
| 80 |
+
if (!normalized) return false;
|
| 81 |
+
if (ALLOW_PROXY_ALL) return true;
|
| 82 |
+
return ALLOWED_TARGETS.some(
|
| 83 |
+
(domain) => normalized === domain || normalized.endsWith(`.${{domain}}`),
|
| 84 |
+
);
|
| 85 |
+
}}
|
| 86 |
+
|
| 87 |
+
async function handleRequest(request) {{
|
| 88 |
+
const url = new URL(request.url);
|
| 89 |
+
const queryTarget = url.searchParams.get("proxy_target");
|
| 90 |
+
const targetHost = request.headers.get("x-target-host") || queryTarget;
|
| 91 |
+
|
| 92 |
+
if (PROXY_SHARED_SECRET) {{
|
| 93 |
+
const providedSecret = request.headers.get("x-proxy-key") || url.searchParams.get("proxy_key") || "";
|
| 94 |
+
if (providedSecret !== PROXY_SHARED_SECRET) {{
|
| 95 |
+
if (url.pathname.startsWith("/bot") && !targetHost) {{
|
| 96 |
+
// Allowed fallback
|
| 97 |
+
}} else {{
|
| 98 |
+
return new Response("Unauthorized: Invalid proxy key", {{ status: 401 }});
|
| 99 |
+
}}
|
| 100 |
+
}}
|
| 101 |
+
}}
|
| 102 |
+
|
| 103 |
+
let targetBase = "";
|
| 104 |
+
if (targetHost) {{
|
| 105 |
+
if (!isAllowedHost(targetHost)) {{
|
| 106 |
+
return new Response(`Forbidden: Host ${{targetHost}} is not allowed.`, {{ status: 403 }});
|
| 107 |
+
}}
|
| 108 |
+
targetBase = `https://${{targetHost}}`;
|
| 109 |
+
}} else if (url.pathname.startsWith("/bot")) {{
|
| 110 |
+
targetBase = "https://api.telegram.org";
|
| 111 |
+
}} else {{
|
| 112 |
+
return new Response("Invalid request: No target host provided.", {{ status: 400 }});
|
| 113 |
+
}}
|
| 114 |
+
|
| 115 |
+
const cleanSearch = new URLSearchParams(url.search);
|
| 116 |
+
cleanSearch.delete("proxy_target");
|
| 117 |
+
cleanSearch.delete("proxy_key");
|
| 118 |
+
const searchStr = cleanSearch.toString();
|
| 119 |
+
const targetUrl = targetBase + url.pathname + (searchStr ? `?${{searchStr}}` : "");
|
| 120 |
+
|
| 121 |
+
const headers = new Headers(request.headers);
|
| 122 |
+
headers.delete("cf-connecting-ip");
|
| 123 |
+
headers.delete("cf-ray");
|
| 124 |
+
headers.delete("cf-visitor");
|
| 125 |
+
headers.delete("host");
|
| 126 |
+
headers.delete("x-real-ip");
|
| 127 |
+
headers.delete("x-target-host");
|
| 128 |
+
headers.delete("x-proxy-key");
|
| 129 |
+
|
| 130 |
+
const proxiedRequest = new Request(targetUrl, {{
|
| 131 |
+
method: request.method,
|
| 132 |
+
headers,
|
| 133 |
+
body: request.body,
|
| 134 |
+
redirect: "follow",
|
| 135 |
+
}});
|
| 136 |
+
|
| 137 |
+
try {{
|
| 138 |
+
return await fetch(proxiedRequest);
|
| 139 |
+
}} catch (error) {{
|
| 140 |
+
return new Response(`Proxy Error: ${{error.message}}`, {{ status: 502 }});
|
| 141 |
+
}}
|
| 142 |
+
}}
|
| 143 |
+
"""
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def write_env(proxy_url: str, proxy_secret: str) -> None:
|
| 147 |
+
ENV_FILE.write_text(
|
| 148 |
+
"\n".join(
|
| 149 |
+
[
|
| 150 |
+
f'export CLOUDFLARE_PROXY_URL="{proxy_url}"',
|
| 151 |
+
f'export CLOUDFLARE_PROXY_SECRET="{proxy_secret}"',
|
| 152 |
+
]
|
| 153 |
+
)
|
| 154 |
+
+ "\n",
|
| 155 |
+
encoding="utf-8",
|
| 156 |
+
)
|
| 157 |
+
# Belt-and-suspenders: even with umask 0077 on the parent shell, force
|
| 158 |
+
# 0600 since the file holds the worker shared secret.
|
| 159 |
+
try:
|
| 160 |
+
ENV_FILE.chmod(0o600)
|
| 161 |
+
except OSError:
|
| 162 |
+
pass
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
def main() -> int:
|
| 166 |
+
existing_url = os.environ.get("CLOUDFLARE_PROXY_URL", "").strip()
|
| 167 |
+
existing_secret = os.environ.get("CLOUDFLARE_PROXY_SECRET", "").strip()
|
| 168 |
+
api_token = os.environ.get("CLOUDFLARE_WORKERS_TOKEN", "").strip()
|
| 169 |
+
|
| 170 |
+
if existing_url:
|
| 171 |
+
# Always write the env file so downstream `. $CF_PROXY_ENV_FILE` in
|
| 172 |
+
# start.sh has CLOUDFLARE_PROXY_URL set even when no secret was
|
| 173 |
+
# supplied. Empty secret means we send no x-proxy-key header — that
|
| 174 |
+
# only works if the deployed worker also has no secret baked in.
|
| 175 |
+
write_env(existing_url, existing_secret)
|
| 176 |
+
if not existing_secret:
|
| 177 |
+
print(
|
| 178 |
+
"Warning: CLOUDFLARE_PROXY_URL is set but CLOUDFLARE_PROXY_SECRET "
|
| 179 |
+
"is empty. Requests will succeed only if the deployed worker "
|
| 180 |
+
"was built without PROXY_SHARED_SECRET; otherwise you'll see "
|
| 181 |
+
"401 Unauthorized.",
|
| 182 |
+
file=sys.stderr,
|
| 183 |
+
)
|
| 184 |
+
return 0
|
| 185 |
+
|
| 186 |
+
if not api_token:
|
| 187 |
+
return 0
|
| 188 |
+
|
| 189 |
+
account_id = os.environ.get("CLOUDFLARE_ACCOUNT_ID", "").strip()
|
| 190 |
+
try:
|
| 191 |
+
if not account_id:
|
| 192 |
+
accounts = cf_request("GET", "/accounts", api_token)
|
| 193 |
+
if not accounts:
|
| 194 |
+
raise RuntimeError("No Cloudflare account available for this token.")
|
| 195 |
+
account_id = accounts[0]["id"]
|
| 196 |
+
|
| 197 |
+
subdomain_info = cf_request(
|
| 198 |
+
"GET",
|
| 199 |
+
f"/accounts/{account_id}/workers/subdomain",
|
| 200 |
+
api_token,
|
| 201 |
+
)
|
| 202 |
+
subdomain = (subdomain_info or {}).get("subdomain", "").strip()
|
| 203 |
+
if not subdomain:
|
| 204 |
+
raise RuntimeError(
|
| 205 |
+
"Cloudflare Workers subdomain is not configured. Enable workers.dev in your Cloudflare account first."
|
| 206 |
+
)
|
| 207 |
+
|
| 208 |
+
worker_name = derive_worker_name()
|
| 209 |
+
allowed_raw = os.environ.get("CLOUDFLARE_PROXY_DOMAINS", "").strip()
|
| 210 |
+
allow_proxy_all = not allowed_raw or allowed_raw == "*"
|
| 211 |
+
allowed_targets = DEFAULT_ALLOWED if not allowed_raw or allow_proxy_all else [
|
| 212 |
+
value.strip() for value in allowed_raw.split(",") if value.strip()
|
| 213 |
+
]
|
| 214 |
+
proxy_secret = existing_secret or secrets.token_urlsafe(24)
|
| 215 |
+
worker_source = render_worker(proxy_secret, allowed_targets, allow_proxy_all)
|
| 216 |
+
|
| 217 |
+
cf_request(
|
| 218 |
+
"PUT",
|
| 219 |
+
f"/accounts/{account_id}/workers/scripts/{worker_name}",
|
| 220 |
+
api_token,
|
| 221 |
+
body=worker_source.encode("utf-8"),
|
| 222 |
+
content_type="application/javascript",
|
| 223 |
+
)
|
| 224 |
+
cf_request(
|
| 225 |
+
"POST",
|
| 226 |
+
f"/accounts/{account_id}/workers/scripts/{worker_name}/subdomain",
|
| 227 |
+
api_token,
|
| 228 |
+
body=json.dumps({"enabled": True, "previews_enabled": True}).encode("utf-8"),
|
| 229 |
+
)
|
| 230 |
+
|
| 231 |
+
proxy_url = f"https://{worker_name}.{subdomain}.workers.dev"
|
| 232 |
+
write_env(proxy_url, proxy_secret)
|
| 233 |
+
return 0
|
| 234 |
+
except urllib.error.HTTPError as error:
|
| 235 |
+
detail = error.read().decode("utf-8", errors="replace")
|
| 236 |
+
if error.code == 403 and '"code":9109' in detail:
|
| 237 |
+
print(
|
| 238 |
+
"Cloudflare proxy setup failed: invalid Workers token. "
|
| 239 |
+
"Use a Cloudflare API Token in CLOUDFLARE_WORKERS_TOKEN "
|
| 240 |
+
"(not a Global API Key, tunnel token, or worker secret). "
|
| 241 |
+
"For auto-setup, it should have account-level 'Workers Scripts: Edit'. "
|
| 242 |
+
"The setup can auto-discover your account; CLOUDFLARE_ACCOUNT_ID is not required.",
|
| 243 |
+
file=sys.stderr,
|
| 244 |
+
)
|
| 245 |
+
print(f"Cloudflare proxy setup failed: HTTP {error.code} {detail}", file=sys.stderr)
|
| 246 |
+
return 1
|
| 247 |
+
except Exception as error:
|
| 248 |
+
print(f"Cloudflare proxy setup failed: {error}", file=sys.stderr)
|
| 249 |
+
return 1
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
if __name__ == "__main__":
|
| 253 |
+
raise SystemExit(main())
|
cloudflare-proxy.js
ADDED
|
@@ -0,0 +1,375 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* Cloudflare Proxy: Transparent Fix for Blocked Domains
|
| 3 |
+
*
|
| 4 |
+
* Patches https.request/http.request/fetch and undici to redirect traffic
|
| 5 |
+
* for blocked hosts through a Cloudflare Worker proxy.
|
| 6 |
+
*/
|
| 7 |
+
"use strict";
|
| 8 |
+
|
| 9 |
+
const https = require("https");
|
| 10 |
+
const http = require("http");
|
| 11 |
+
|
| 12 |
+
// Use stderr for logs to avoid breaking child processes that communicate via stdout JSON
|
| 13 |
+
const log = (...args) => console.error(...args);
|
| 14 |
+
|
| 15 |
+
let PROXY_URL = process.env.CLOUDFLARE_PROXY_URL;
|
| 16 |
+
if (
|
| 17 |
+
PROXY_URL &&
|
| 18 |
+
!PROXY_URL.startsWith("http://") &&
|
| 19 |
+
!PROXY_URL.startsWith("https://")
|
| 20 |
+
) {
|
| 21 |
+
PROXY_URL = `https://${PROXY_URL}`;
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
const DEBUG = process.env.CLOUDFLARE_PROXY_DEBUG === "true";
|
| 25 |
+
const PROXY_SHARED_SECRET = (process.env.CLOUDFLARE_PROXY_SECRET || "").trim();
|
| 26 |
+
const PROXY_DOMAINS = process.env.CLOUDFLARE_PROXY_DOMAINS || "*";
|
| 27 |
+
const BLOCKED_DOMAINS = PROXY_DOMAINS.split(",")
|
| 28 |
+
.map((domain) => domain.trim())
|
| 29 |
+
.filter(Boolean);
|
| 30 |
+
const PROXY_ALL = PROXY_DOMAINS === "*";
|
| 31 |
+
|
| 32 |
+
if (PROXY_URL) {
|
| 33 |
+
try {
|
| 34 |
+
const proxy = new URL(PROXY_URL);
|
| 35 |
+
const originalHttpsRequest = https.request;
|
| 36 |
+
const originalHttpRequest = http.request;
|
| 37 |
+
const originalFetch =
|
| 38 |
+
typeof globalThis.fetch === "function" ? globalThis.fetch.bind(globalThis) : null;
|
| 39 |
+
|
| 40 |
+
const shouldProxyHost = (hostname) => {
|
| 41 |
+
const normalized = String(hostname || "").trim().toLowerCase();
|
| 42 |
+
if (!normalized) return false;
|
| 43 |
+
|
| 44 |
+
const isInternal =
|
| 45 |
+
normalized === "localhost" ||
|
| 46 |
+
normalized === "127.0.0.1" ||
|
| 47 |
+
normalized === "::1" ||
|
| 48 |
+
normalized === "0.0.0.0" ||
|
| 49 |
+
normalized === proxy.hostname ||
|
| 50 |
+
normalized.endsWith(".hf.space") ||
|
| 51 |
+
normalized.endsWith(".huggingface.co") ||
|
| 52 |
+
normalized === "huggingface.co";
|
| 53 |
+
|
| 54 |
+
const should = PROXY_ALL ? !isInternal : BLOCKED_DOMAINS.some(
|
| 55 |
+
(domain) =>
|
| 56 |
+
normalized === domain || normalized.endsWith(`.${domain}`),
|
| 57 |
+
);
|
| 58 |
+
|
| 59 |
+
return should;
|
| 60 |
+
};
|
| 61 |
+
|
| 62 |
+
const patch = (original, originalModuleName) => {
|
| 63 |
+
return function patchedRequest(arg1, arg2, arg3) {
|
| 64 |
+
let options = {};
|
| 65 |
+
let callback;
|
| 66 |
+
|
| 67 |
+
if (typeof arg1 === "string" || arg1 instanceof URL) {
|
| 68 |
+
const url = typeof arg1 === "string" ? new URL(arg1) : arg1;
|
| 69 |
+
options = {
|
| 70 |
+
protocol: url.protocol,
|
| 71 |
+
hostname: url.hostname,
|
| 72 |
+
port: url.port,
|
| 73 |
+
path: url.pathname + url.search,
|
| 74 |
+
};
|
| 75 |
+
if (typeof arg2 === "object" && arg2 !== null) {
|
| 76 |
+
options = { ...options, ...arg2 };
|
| 77 |
+
callback = arg3;
|
| 78 |
+
} else {
|
| 79 |
+
callback = arg2;
|
| 80 |
+
}
|
| 81 |
+
} else {
|
| 82 |
+
options = { ...arg1 };
|
| 83 |
+
callback = arg2;
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
const hostname =
|
| 87 |
+
options.hostname ||
|
| 88 |
+
(options.host ? String(options.host).split(":")[0] : "");
|
| 89 |
+
const path = options.path || "/";
|
| 90 |
+
const headers = options.headers || {};
|
| 91 |
+
|
| 92 |
+
const shouldProxy = shouldProxyHost(hostname);
|
| 93 |
+
const alreadyProxied = options._proxied;
|
| 94 |
+
const hasTargetHeader =
|
| 95 |
+
headers["x-target-host"] || headers["X-Target-Host"];
|
| 96 |
+
|
| 97 |
+
if (shouldProxy && !alreadyProxied && !hasTargetHeader) {
|
| 98 |
+
if (DEBUG) {
|
| 99 |
+
log(
|
| 100 |
+
`[cloudflare-proxy] Redirecting ${originalModuleName}://${hostname}${path} -> ${proxy.hostname}`,
|
| 101 |
+
);
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
const newOptions = { ...options };
|
| 105 |
+
newOptions._proxied = true;
|
| 106 |
+
newOptions.protocol = "https:";
|
| 107 |
+
newOptions.hostname = proxy.hostname;
|
| 108 |
+
newOptions.port = proxy.port || 443;
|
| 109 |
+
newOptions.servername = proxy.hostname;
|
| 110 |
+
delete newOptions.host;
|
| 111 |
+
delete newOptions.agent;
|
| 112 |
+
|
| 113 |
+
newOptions.headers = {
|
| 114 |
+
...(options.headers || {}),
|
| 115 |
+
host: proxy.host,
|
| 116 |
+
"x-target-host": hostname,
|
| 117 |
+
};
|
| 118 |
+
|
| 119 |
+
if (PROXY_SHARED_SECRET) {
|
| 120 |
+
newOptions.headers["x-proxy-key"] = PROXY_SHARED_SECRET;
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
return originalHttpsRequest.call(https, newOptions, callback);
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
return original.call(this, arg1, arg2, arg3);
|
| 127 |
+
};
|
| 128 |
+
};
|
| 129 |
+
|
| 130 |
+
https.request = patch(originalHttpsRequest, "https");
|
| 131 |
+
http.request = patch(originalHttpRequest, "http");
|
| 132 |
+
|
| 133 |
+
if (originalFetch) {
|
| 134 |
+
globalThis.fetch = async function patchedFetch(input, init) {
|
| 135 |
+
const request = input instanceof Request ? input : null;
|
| 136 |
+
const urlStr = request ? request.url : String(input);
|
| 137 |
+
|
| 138 |
+
let url;
|
| 139 |
+
try {
|
| 140 |
+
url = new URL(urlStr);
|
| 141 |
+
} catch (e) {
|
| 142 |
+
return originalFetch(input, init);
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
const hostname = url.hostname;
|
| 146 |
+
const shouldProxy = shouldProxyHost(hostname);
|
| 147 |
+
|
| 148 |
+
let mergedHeaders;
|
| 149 |
+
if (request) {
|
| 150 |
+
mergedHeaders = new Headers(request.headers);
|
| 151 |
+
} else {
|
| 152 |
+
mergedHeaders = new Headers(init?.headers || {});
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
const alreadyProxied =
|
| 156 |
+
mergedHeaders.has("x-target-host") || mergedHeaders.has("X-Target-Host");
|
| 157 |
+
|
| 158 |
+
if (!shouldProxy || alreadyProxied) {
|
| 159 |
+
return originalFetch(input, init);
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
if (DEBUG) {
|
| 163 |
+
log(
|
| 164 |
+
`[cloudflare-proxy] Redirecting fetch://${hostname}${url.pathname}${url.search} -> ${proxy.hostname}`,
|
| 165 |
+
);
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
mergedHeaders.set("x-target-host", hostname);
|
| 169 |
+
if (PROXY_SHARED_SECRET) {
|
| 170 |
+
mergedHeaders.set("x-proxy-key", PROXY_SHARED_SECRET);
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
const proxiedUrl = new URL(url.pathname + url.search, proxy);
|
| 174 |
+
|
| 175 |
+
const logProxyError = (promise, debugInfo) => {
|
| 176 |
+
promise
|
| 177 |
+
.then(r => {
|
| 178 |
+
if (DEBUG && !r.ok) {
|
| 179 |
+
log(`[cloudflare-proxy] Proxy HTTP ${r.status} for ${hostname}: ${r.statusText}`);
|
| 180 |
+
}
|
| 181 |
+
})
|
| 182 |
+
.catch(err => {
|
| 183 |
+
const cause = err?.cause;
|
| 184 |
+
const causeStr = cause
|
| 185 |
+
? ` | cause: ${cause?.code || cause?.message || String(cause)}`
|
| 186 |
+
: "";
|
| 187 |
+
log(`[cloudflare-proxy] Proxy FAILED ${hostname}: ${err?.message}${causeStr}`);
|
| 188 |
+
if (DEBUG && debugInfo) {
|
| 189 |
+
log(`[cloudflare-proxy] Debug: ${debugInfo}`);
|
| 190 |
+
}
|
| 191 |
+
});
|
| 192 |
+
return promise;
|
| 193 |
+
};
|
| 194 |
+
|
| 195 |
+
if (request) {
|
| 196 |
+
const fetchOpts = {
|
| 197 |
+
method: request.method,
|
| 198 |
+
headers: mergedHeaders,
|
| 199 |
+
redirect: request.redirect,
|
| 200 |
+
};
|
| 201 |
+
if (request.body) {
|
| 202 |
+
fetchOpts.body = request.body;
|
| 203 |
+
fetchOpts.duplex = "half";
|
| 204 |
+
}
|
| 205 |
+
return logProxyError(
|
| 206 |
+
originalFetch(String(proxiedUrl), fetchOpts),
|
| 207 |
+
`request-mode method=${request.method} hasBody=${!!request.body}`,
|
| 208 |
+
);
|
| 209 |
+
}
|
| 210 |
+
|
| 211 |
+
// Build a fresh init: do NOT spread `init` because it may carry a
|
| 212 |
+
// `dispatcher`/`client` pinned to the original target's connection
|
| 213 |
+
// pool, which causes undici to throw UND_ERR_INVALID_ARG when we
|
| 214 |
+
// change the origin. Forward only well-known fetch options.
|
| 215 |
+
const newInit = {
|
| 216 |
+
method: init?.method || "GET",
|
| 217 |
+
headers: mergedHeaders,
|
| 218 |
+
};
|
| 219 |
+
if (init?.body != null) {
|
| 220 |
+
newInit.body = init.body;
|
| 221 |
+
if (init.body instanceof ReadableStream) {
|
| 222 |
+
newInit.duplex = init.duplex || "half";
|
| 223 |
+
}
|
| 224 |
+
}
|
| 225 |
+
if (init?.signal) newInit.signal = init.signal;
|
| 226 |
+
if (init?.redirect) newInit.redirect = init.redirect;
|
| 227 |
+
if (init?.credentials) newInit.credentials = init.credentials;
|
| 228 |
+
if (init?.cache) newInit.cache = init.cache;
|
| 229 |
+
if (init?.mode) newInit.mode = init.mode;
|
| 230 |
+
if (init?.referrer) newInit.referrer = init.referrer;
|
| 231 |
+
if (init?.referrerPolicy) newInit.referrerPolicy = init.referrerPolicy;
|
| 232 |
+
if (init?.integrity) newInit.integrity = init.integrity;
|
| 233 |
+
if (init?.keepalive != null) newInit.keepalive = init.keepalive;
|
| 234 |
+
|
| 235 |
+
const bodyType = init?.body == null
|
| 236 |
+
? "none"
|
| 237 |
+
: init.body instanceof ReadableStream
|
| 238 |
+
? "ReadableStream"
|
| 239 |
+
: (init.body?.constructor?.name || typeof init.body);
|
| 240 |
+
|
| 241 |
+
return logProxyError(
|
| 242 |
+
originalFetch(String(proxiedUrl), newInit),
|
| 243 |
+
`init-mode method=${newInit.method} body=${bodyType} initKeys=${Object.keys(init || {}).join(",")}`,
|
| 244 |
+
);
|
| 245 |
+
};
|
| 246 |
+
}
|
| 247 |
+
|
| 248 |
+
// undici patching
|
| 249 |
+
const patchUndiciInstance = (exports) => {
|
| 250 |
+
if (!exports) return;
|
| 251 |
+
|
| 252 |
+
const patchDispatch = (proto, name) => {
|
| 253 |
+
if (proto && proto.dispatch && !proto.dispatch._patched) {
|
| 254 |
+
const origDispatch = proto.dispatch;
|
| 255 |
+
proto.dispatch = function(options, handler) {
|
| 256 |
+
let origin = options.origin || this.origin;
|
| 257 |
+
if (origin && typeof origin !== 'string') {
|
| 258 |
+
try { origin = origin.origin || origin.toString(); } catch (e) { origin = ""; }
|
| 259 |
+
}
|
| 260 |
+
|
| 261 |
+
let hostname = "";
|
| 262 |
+
try {
|
| 263 |
+
hostname = new URL(String(origin)).hostname;
|
| 264 |
+
} catch(e) {
|
| 265 |
+
hostname = String(origin || "").split(':')[0];
|
| 266 |
+
}
|
| 267 |
+
|
| 268 |
+
if (hostname && shouldProxyHost(hostname)) {
|
| 269 |
+
if (DEBUG) log(`[cloudflare-proxy] Redirecting undici ${name}.dispatch: ${hostname}${options.path || ""} -> ${proxy.hostname}`);
|
| 270 |
+
|
| 271 |
+
const targetHeader = "x-target-host";
|
| 272 |
+
const secretHeader = "x-proxy-key";
|
| 273 |
+
|
| 274 |
+
if (Array.isArray(options.headers)) {
|
| 275 |
+
let foundTarget = false;
|
| 276 |
+
for (let i = 0; i < options.headers.length; i += 2) {
|
| 277 |
+
if (String(options.headers[i]).toLowerCase() === targetHeader) {
|
| 278 |
+
foundTarget = true;
|
| 279 |
+
break;
|
| 280 |
+
}
|
| 281 |
+
}
|
| 282 |
+
if (!foundTarget) {
|
| 283 |
+
options.headers.push(targetHeader, hostname);
|
| 284 |
+
if (PROXY_SHARED_SECRET) options.headers.push(secretHeader, PROXY_SHARED_SECRET);
|
| 285 |
+
}
|
| 286 |
+
} else {
|
| 287 |
+
options.headers = options.headers || {};
|
| 288 |
+
if (options.headers instanceof Map || (typeof options.headers.set === 'function')) {
|
| 289 |
+
options.headers.set(targetHeader, hostname);
|
| 290 |
+
if (PROXY_SHARED_SECRET) options.headers.set(secretHeader, PROXY_SHARED_SECRET);
|
| 291 |
+
} else {
|
| 292 |
+
options.headers[targetHeader] = hostname;
|
| 293 |
+
if (PROXY_SHARED_SECRET) options.headers[secretHeader] = PROXY_SHARED_SECRET;
|
| 294 |
+
}
|
| 295 |
+
}
|
| 296 |
+
options.origin = `https://${proxy.hostname}`;
|
| 297 |
+
}
|
| 298 |
+
return origDispatch.call(this, options, handler);
|
| 299 |
+
};
|
| 300 |
+
proto.dispatch._patched = true;
|
| 301 |
+
}
|
| 302 |
+
};
|
| 303 |
+
|
| 304 |
+
for (const key in exports) {
|
| 305 |
+
if (exports[key] && exports[key].prototype && typeof exports[key].prototype.dispatch === 'function') {
|
| 306 |
+
patchDispatch(exports[key].prototype, key);
|
| 307 |
+
}
|
| 308 |
+
}
|
| 309 |
+
|
| 310 |
+
if (exports.getGlobalDispatcher) {
|
| 311 |
+
try {
|
| 312 |
+
const globalDispatcher = exports.getGlobalDispatcher();
|
| 313 |
+
if (globalDispatcher && globalDispatcher.dispatch && !globalDispatcher.dispatch._patched) {
|
| 314 |
+
patchDispatch(globalDispatcher, "GlobalDispatcherInstance");
|
| 315 |
+
}
|
| 316 |
+
} catch (e) {}
|
| 317 |
+
}
|
| 318 |
+
|
| 319 |
+
// Also patch Agent and other potentially unexported classes if they have dispatch
|
| 320 |
+
if (exports.Agent && exports.Agent.prototype) patchDispatch(exports.Agent.prototype, "Agent");
|
| 321 |
+
if (exports.Pool && exports.Pool.prototype) patchDispatch(exports.Pool.prototype, "Pool");
|
| 322 |
+
if (exports.Client && exports.Client.prototype) patchDispatch(exports.Client.prototype, "Client");
|
| 323 |
+
|
| 324 |
+
if (exports.fetch && !exports.fetch._patched) {
|
| 325 |
+
const origFetch = exports.fetch;
|
| 326 |
+
exports.fetch = async function (input, init) {
|
| 327 |
+
// If we are calling undici.fetch, it should use our globalThis.fetch which is patched
|
| 328 |
+
return globalThis.fetch(input, init);
|
| 329 |
+
};
|
| 330 |
+
exports.fetch._patched = true;
|
| 331 |
+
}
|
| 332 |
+
};
|
| 333 |
+
|
| 334 |
+
// Try to require undici immediately
|
| 335 |
+
try {
|
| 336 |
+
const undici = require("undici");
|
| 337 |
+
patchUndiciInstance(undici);
|
| 338 |
+
} catch (e) {}
|
| 339 |
+
|
| 340 |
+
// Hook require() to patch any undici instance the moment it loads.
|
| 341 |
+
// Match either the bare "undici" id or paths whose final package
|
| 342 |
+
// segment IS undici (e.g. "/foo/node_modules/undici/index.js"). The
|
| 343 |
+
// earlier substring check `id.includes("/undici/")` would also match
|
| 344 |
+
// unrelated packages like "super-undici-x".
|
| 345 |
+
const Module = require("module");
|
| 346 |
+
const originalRequire = Module.prototype.require;
|
| 347 |
+
const UNDICI_PATH_RE = /(?:^|\/)node_modules\/undici(?:\/|$)/;
|
| 348 |
+
Module.prototype.require = function (id) {
|
| 349 |
+
const exports = originalRequire.apply(this, arguments);
|
| 350 |
+
if (id === "undici" || UNDICI_PATH_RE.test(id)) {
|
| 351 |
+
try { patchUndiciInstance(exports); } catch (e) {}
|
| 352 |
+
}
|
| 353 |
+
return exports;
|
| 354 |
+
};
|
| 355 |
+
|
| 356 |
+
// Startup banner: print once across all Node spawns. Use a file marker
|
| 357 |
+
// because every Node process (health-server, gateway, sync subprocess)
|
| 358 |
+
// is spawned fresh from bash with NODE_OPTIONS=--require, so an env-var
|
| 359 |
+
// marker won't propagate. /tmp is per-container so it resets on rebuild.
|
| 360 |
+
if (DEBUG) {
|
| 361 |
+
try {
|
| 362 |
+
require("fs").writeFileSync("/tmp/.cf-proxy-banner-shown", "1", {
|
| 363 |
+
flag: "wx",
|
| 364 |
+
});
|
| 365 |
+
log(
|
| 366 |
+
`[cloudflare-proxy] active (${PROXY_ALL ? "wildcard" : "list"}) -> ${proxy.hostname}`,
|
| 367 |
+
);
|
| 368 |
+
} catch (_) {
|
| 369 |
+
// marker exists — banner already shown by another process
|
| 370 |
+
}
|
| 371 |
+
}
|
| 372 |
+
} catch (error) {
|
| 373 |
+
log(`[cloudflare-proxy] Failed to initialize: ${error.message}`);
|
| 374 |
+
}
|
| 375 |
+
}
|
cloudflare-worker.js
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* Cloudflare Worker: Universal Outbound Proxy
|
| 3 |
+
*
|
| 4 |
+
* Manual setup:
|
| 5 |
+
* 1. Create a Cloudflare Worker.
|
| 6 |
+
* 2. Paste this file and deploy it.
|
| 7 |
+
* 3. Use the worker URL as CLOUDFLARE_PROXY_URL.
|
| 8 |
+
*
|
| 9 |
+
* Optional worker vars:
|
| 10 |
+
* - PROXY_SHARED_SECRET
|
| 11 |
+
* - ALLOWED_TARGETS
|
| 12 |
+
* - ALLOW_PROXY_ALL
|
| 13 |
+
*/
|
| 14 |
+
|
| 15 |
+
function normalizeList(raw) {
|
| 16 |
+
return String(raw || "")
|
| 17 |
+
.split(",")
|
| 18 |
+
.map((value) => value.trim().toLowerCase())
|
| 19 |
+
.filter(Boolean);
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
export default {
|
| 23 |
+
async fetch(request, env) {
|
| 24 |
+
const url = new URL(request.url);
|
| 25 |
+
const queryTarget = url.searchParams.get("proxy_target");
|
| 26 |
+
const targetHost = request.headers.get("x-target-host") || queryTarget;
|
| 27 |
+
const proxySecret = (
|
| 28 |
+
env.PROXY_SHARED_SECRET ||
|
| 29 |
+
env.CLOUDFLARE_PROXY_SECRET ||
|
| 30 |
+
""
|
| 31 |
+
).trim();
|
| 32 |
+
|
| 33 |
+
if (proxySecret) {
|
| 34 |
+
const providedSecret = request.headers.get("x-proxy-key") || url.searchParams.get("proxy_key") || "";
|
| 35 |
+
if (providedSecret !== proxySecret) {
|
| 36 |
+
// Fallback: allow Telegram requests via path without secret if it looks like a bot API call.
|
| 37 |
+
// This is safe because it only proxies to api.telegram.org.
|
| 38 |
+
if (url.pathname.startsWith("/bot") && !targetHost) {
|
| 39 |
+
// Allowed
|
| 40 |
+
} else {
|
| 41 |
+
return new Response("Unauthorized: Invalid proxy key", { status: 401 });
|
| 42 |
+
}
|
| 43 |
+
}
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
const allowProxyAll =
|
| 47 |
+
String(env.ALLOW_PROXY_ALL || "true").toLowerCase() === "true";
|
| 48 |
+
const allowedTargets = normalizeList(
|
| 49 |
+
env.ALLOWED_TARGETS || "api.telegram.org,discord.com,discordapp.com,gateway.discord.gg,status.discord.com,web.whatsapp.com,graph.facebook.com,googleapis.com,google.com,googleusercontent.com,gstatic.com",
|
| 50 |
+
);
|
| 51 |
+
|
| 52 |
+
const isAllowedHost = (hostname) => {
|
| 53 |
+
const normalized = String(hostname || "")
|
| 54 |
+
.trim()
|
| 55 |
+
.toLowerCase();
|
| 56 |
+
if (!normalized) return false;
|
| 57 |
+
if (allowProxyAll) return true;
|
| 58 |
+
return allowedTargets.some(
|
| 59 |
+
(domain) => normalized === domain || normalized.endsWith(`.${domain}`),
|
| 60 |
+
);
|
| 61 |
+
};
|
| 62 |
+
|
| 63 |
+
let targetBase = "";
|
| 64 |
+
if (targetHost) {
|
| 65 |
+
if (!isAllowedHost(targetHost)) {
|
| 66 |
+
return new Response(`Forbidden: Host ${targetHost} is not allowed.`, { status: 403 });
|
| 67 |
+
}
|
| 68 |
+
targetBase = `https://${targetHost}`;
|
| 69 |
+
} else if (url.pathname.startsWith("/bot")) {
|
| 70 |
+
targetBase = "https://api.telegram.org";
|
| 71 |
+
} else {
|
| 72 |
+
return new Response("Invalid request: No target host provided.", { status: 400 });
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
const cleanSearch = new URLSearchParams(url.search);
|
| 76 |
+
cleanSearch.delete("proxy_target");
|
| 77 |
+
cleanSearch.delete("proxy_key");
|
| 78 |
+
const searchStr = cleanSearch.toString();
|
| 79 |
+
const targetUrl = targetBase + url.pathname + (searchStr ? `?${searchStr}` : "");
|
| 80 |
+
|
| 81 |
+
const headers = new Headers(request.headers);
|
| 82 |
+
headers.delete("cf-connecting-ip");
|
| 83 |
+
headers.delete("cf-ray");
|
| 84 |
+
headers.delete("cf-visitor");
|
| 85 |
+
headers.delete("host");
|
| 86 |
+
headers.delete("x-real-ip");
|
| 87 |
+
headers.delete("x-target-host");
|
| 88 |
+
headers.delete("x-proxy-key");
|
| 89 |
+
|
| 90 |
+
const proxiedRequest = new Request(targetUrl, {
|
| 91 |
+
method: request.method,
|
| 92 |
+
headers,
|
| 93 |
+
body: request.body,
|
| 94 |
+
redirect: "follow",
|
| 95 |
+
});
|
| 96 |
+
|
| 97 |
+
try {
|
| 98 |
+
return await fetch(proxiedRequest);
|
| 99 |
+
} catch (error) {
|
| 100 |
+
return new Response(`Proxy Error: ${error.message}`, { status: 502 });
|
| 101 |
+
}
|
| 102 |
+
},
|
| 103 |
+
};
|
docker-compose.yml
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Local-dev convenience. For HF Space deployment, the Dockerfile alone is
|
| 2 |
+
# enough — HF runs the container with auto-injected SPACE_HOST.
|
| 3 |
+
#
|
| 4 |
+
# Usage:
|
| 5 |
+
# cp .env.example .env # add HF_TOKEN
|
| 6 |
+
# docker compose up --build
|
| 7 |
+
# open http://localhost:7860/
|
| 8 |
+
|
| 9 |
+
services:
|
| 10 |
+
huggingpost:
|
| 11 |
+
build:
|
| 12 |
+
context: .
|
| 13 |
+
dockerfile: Dockerfile
|
| 14 |
+
container_name: huggingpost
|
| 15 |
+
environment:
|
| 16 |
+
# HF backup (optional for local testing, but lets you smoke-test the
|
| 17 |
+
# restore path).
|
| 18 |
+
HF_TOKEN: ${HF_TOKEN:-}
|
| 19 |
+
HF_USERNAME: ${HF_USERNAME:-}
|
| 20 |
+
SYNC_INTERVAL: "180"
|
| 21 |
+
BACKUP_DATASET_NAME: huggingpost-backup-dev
|
| 22 |
+
|
| 23 |
+
# Public URL override (no SPACE_HOST when running locally)
|
| 24 |
+
FRONTEND_URL: http://localhost:7860
|
| 25 |
+
NEXT_PUBLIC_BACKEND_URL: http://localhost:7860/api
|
| 26 |
+
BACKEND_INTERNAL_URL: http://localhost:3000
|
| 27 |
+
|
| 28 |
+
# Storage
|
| 29 |
+
STORAGE_PROVIDER: local
|
| 30 |
+
|
| 31 |
+
# Cloudflare proxy (optional)
|
| 32 |
+
# CLOUDFLARE_WORKERS_TOKEN: ${CLOUDFLARE_WORKERS_TOKEN:-}
|
| 33 |
+
|
| 34 |
+
ports:
|
| 35 |
+
- "7860:7860" # public — dashboard + reverse proxy
|
| 36 |
+
- "3000:3000" # direct backend access (dev only)
|
| 37 |
+
- "4200:4200" # direct frontend access (dev only)
|
| 38 |
+
volumes:
|
| 39 |
+
- postiz_data:/postiz
|
| 40 |
+
# Hot-reload of orchestration scripts during local dev (rebuild image
|
| 41 |
+
# when Dockerfile / postiz tree changes):
|
| 42 |
+
- ./start.sh:/app/start.sh
|
| 43 |
+
- ./health-server.js:/app/health-server.js
|
| 44 |
+
- ./postiz-sync.py:/app/postiz-sync.py
|
| 45 |
+
healthcheck:
|
| 46 |
+
test: ["CMD", "curl", "-f", "http://localhost:7860/health"]
|
| 47 |
+
interval: 30s
|
| 48 |
+
timeout: 10s
|
| 49 |
+
retries: 3
|
| 50 |
+
start_period: 180s
|
| 51 |
+
|
| 52 |
+
volumes:
|
| 53 |
+
postiz_data:
|
| 54 |
+
driver: local
|
health-server.js
ADDED
|
@@ -0,0 +1,833 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Single public entrypoint for HF Spaces: HuggingPost dashboard + reverse
|
| 2 |
+
// proxy to Postiz (which lives behind the container's internal nginx on
|
| 3 |
+
// port 5000 — that nginx routes /api → backend, /uploads → file system,
|
| 4 |
+
// / → frontend).
|
| 5 |
+
//
|
| 6 |
+
// Routing rules (in order):
|
| 7 |
+
// /health, /status, /uptimerobot/setup → handled here
|
| 8 |
+
// / (exact) → HuggingPost dashboard HTML
|
| 9 |
+
// /app or /app/* → Postiz (nginx :5000), /app prefix stripped
|
| 10 |
+
// /_next/* or /static/* → 301 redirect to /app/<same path>
|
| 11 |
+
// (catches asset URLs Next.js may emit
|
| 12 |
+
// without basePath in edge cases)
|
| 13 |
+
// anything else → 404
|
| 14 |
+
//
|
| 15 |
+
// Why strip /app: the Postiz frontend is built with basePath="/app" so it
|
| 16 |
+
// emits asset URLs prefixed with /app. The browser sends /app/_next/foo to
|
| 17 |
+
// us; we strip /app and forward /_next/foo to nginx :5000, which forwards
|
| 18 |
+
// to Next.js on :4200. nginx's own routes (/api, /uploads, /) are also
|
| 19 |
+
// reached after we strip the /app prefix.
|
| 20 |
+
|
| 21 |
+
const http = require("http");
|
| 22 |
+
const https = require("https");
|
| 23 |
+
const fs = require("fs");
|
| 24 |
+
const net = require("net");
|
| 25 |
+
|
| 26 |
+
const PORT = 7860;
|
| 27 |
+
const POSTIZ_HOST = "127.0.0.1";
|
| 28 |
+
const POSTIZ_PORT = 5000;
|
| 29 |
+
|
| 30 |
+
const startTime = Date.now();
|
| 31 |
+
const HF_BACKUP_ENABLED = !!process.env.HF_TOKEN;
|
| 32 |
+
const SYNC_INTERVAL = process.env.SYNC_INTERVAL || "300";
|
| 33 |
+
const UPTIMEROBOT_SETUP_ENABLED =
|
| 34 |
+
String(process.env.UPTIMEROBOT_SETUP_ENABLED || "true").toLowerCase() === "true";
|
| 35 |
+
const UPTIMEROBOT_RATE_WINDOW_MS = 60 * 1000;
|
| 36 |
+
const UPTIMEROBOT_RATE_MAX = Number(process.env.UPTIMEROBOT_RATE_LIMIT_PER_MINUTE || 5);
|
| 37 |
+
const SPACE_VISIBILITY_TTL_MS = 10 * 60 * 1000;
|
| 38 |
+
const spaceVisibilityCache = new Map();
|
| 39 |
+
const uptimerobotRateMap = new Map();
|
| 40 |
+
|
| 41 |
+
// ============================================================================
|
| 42 |
+
// URL helpers
|
| 43 |
+
// ============================================================================
|
| 44 |
+
|
| 45 |
+
function parseRequestUrl(url) {
|
| 46 |
+
try { return new URL(url, "http://localhost"); }
|
| 47 |
+
catch { return new URL("http://localhost/"); }
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
function isLocalRoute(pathname) {
|
| 51 |
+
return (
|
| 52 |
+
pathname === "/health" ||
|
| 53 |
+
pathname === "/status" ||
|
| 54 |
+
pathname === "/uptimerobot/setup" ||
|
| 55 |
+
pathname === "/" ||
|
| 56 |
+
pathname === ""
|
| 57 |
+
);
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
// ============================================================================
|
| 61 |
+
// UptimeRobot helpers
|
| 62 |
+
// ============================================================================
|
| 63 |
+
|
| 64 |
+
function getRequesterIp(req) {
|
| 65 |
+
const forwarded = req.headers["x-forwarded-for"];
|
| 66 |
+
if (typeof forwarded === "string") return forwarded.split(",")[0].trim();
|
| 67 |
+
if (Array.isArray(forwarded) && forwarded.length > 0) return String(forwarded[0]).split(",")[0].trim();
|
| 68 |
+
return req.socket.remoteAddress || "unknown";
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
function isRateLimited(req) {
|
| 72 |
+
const now = Date.now();
|
| 73 |
+
const ip = getRequesterIp(req);
|
| 74 |
+
const bucket = uptimerobotRateMap.get(ip) || [];
|
| 75 |
+
const recent = bucket.filter((ts) => now - ts < UPTIMEROBOT_RATE_WINDOW_MS);
|
| 76 |
+
recent.push(now);
|
| 77 |
+
uptimerobotRateMap.set(ip, recent);
|
| 78 |
+
return recent.length > UPTIMEROBOT_RATE_MAX;
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
setInterval(() => {
|
| 82 |
+
const cutoff = Date.now() - UPTIMEROBOT_RATE_WINDOW_MS;
|
| 83 |
+
for (const [ip, timestamps] of uptimerobotRateMap) {
|
| 84 |
+
if (timestamps.every((ts) => ts < cutoff)) uptimerobotRateMap.delete(ip);
|
| 85 |
+
}
|
| 86 |
+
}, 5 * 60 * 1000).unref();
|
| 87 |
+
|
| 88 |
+
function isAllowedUptimeSetupOrigin(req) {
|
| 89 |
+
const host = String(req.headers.host || "").toLowerCase();
|
| 90 |
+
const origin = String(req.headers.origin || "").toLowerCase();
|
| 91 |
+
const referer = String(req.headers.referer || "").toLowerCase();
|
| 92 |
+
if (!host) return false;
|
| 93 |
+
if (origin && !origin.includes(host)) return false;
|
| 94 |
+
if (referer && !referer.includes(host)) return false;
|
| 95 |
+
return true;
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
function isValidUptimeApiKey(key) {
|
| 99 |
+
return /^[A-Za-z0-9_-]{20,128}$/.test(String(key || ""));
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
function decodeJwtPayload(token) {
|
| 103 |
+
try {
|
| 104 |
+
const parts = String(token || "").split(".");
|
| 105 |
+
if (parts.length < 2) return null;
|
| 106 |
+
const normalized = parts[1].replace(/-/g, "+").replace(/_/g, "/");
|
| 107 |
+
const padded = normalized + "=".repeat((4 - (normalized.length % 4)) % 4);
|
| 108 |
+
return JSON.parse(Buffer.from(padded, "base64").toString("utf8"));
|
| 109 |
+
} catch { return null; }
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
function getSpaceRef(parsedUrl) {
|
| 113 |
+
const signedToken = parsedUrl.searchParams.get("__sign");
|
| 114 |
+
if (!signedToken) return null;
|
| 115 |
+
const payload = decodeJwtPayload(signedToken);
|
| 116 |
+
const subject = payload && payload.sub;
|
| 117 |
+
const match = typeof subject === "string"
|
| 118 |
+
? subject.match(/^\/spaces\/([^/]+)\/([^/]+)$/)
|
| 119 |
+
: null;
|
| 120 |
+
if (!match) return null;
|
| 121 |
+
return { owner: match[1], repo: match[2] };
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
function fetchStatusCode(url) {
|
| 125 |
+
return new Promise((resolve, reject) => {
|
| 126 |
+
const req = https.get(
|
| 127 |
+
url,
|
| 128 |
+
{ headers: { "user-agent": "HuggingPost/1.0", accept: "application/json" } },
|
| 129 |
+
(res) => { res.resume(); resolve(res.statusCode || 0); },
|
| 130 |
+
);
|
| 131 |
+
req.on("error", reject);
|
| 132 |
+
req.setTimeout(5000, () => req.destroy(new Error("timeout")));
|
| 133 |
+
});
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
async function resolveSpaceIsPrivate(parsedUrl) {
|
| 137 |
+
const ref = getSpaceRef(parsedUrl);
|
| 138 |
+
if (!ref) return false;
|
| 139 |
+
const cacheKey = `${ref.owner}/${ref.repo}`;
|
| 140 |
+
const cached = spaceVisibilityCache.get(cacheKey);
|
| 141 |
+
if (cached && Date.now() - cached.timestamp < SPACE_VISIBILITY_TTL_MS) return cached.isPrivate;
|
| 142 |
+
try {
|
| 143 |
+
const statusCode = await fetchStatusCode(`https://huggingface.co/api/spaces/${ref.owner}/${ref.repo}`);
|
| 144 |
+
const isPrivate = statusCode === 401 || statusCode === 403 || statusCode === 404;
|
| 145 |
+
spaceVisibilityCache.set(cacheKey, { isPrivate, timestamp: Date.now() });
|
| 146 |
+
return isPrivate;
|
| 147 |
+
} catch {
|
| 148 |
+
if (cached) return cached.isPrivate;
|
| 149 |
+
return false;
|
| 150 |
+
}
|
| 151 |
+
}
|
| 152 |
+
|
| 153 |
+
function postUptimeRobot(path, form) {
|
| 154 |
+
const body = new URLSearchParams(form).toString();
|
| 155 |
+
return new Promise((resolve, reject) => {
|
| 156 |
+
const request = https.request(
|
| 157 |
+
{
|
| 158 |
+
hostname: "api.uptimerobot.com", port: 443, method: "POST", path,
|
| 159 |
+
headers: { "Content-Type": "application/x-www-form-urlencoded", "Content-Length": Buffer.byteLength(body) },
|
| 160 |
+
},
|
| 161 |
+
(response) => {
|
| 162 |
+
let raw = "";
|
| 163 |
+
response.setEncoding("utf8");
|
| 164 |
+
response.on("data", (c) => { raw += c; });
|
| 165 |
+
response.on("end", () => {
|
| 166 |
+
try { resolve(JSON.parse(raw)); }
|
| 167 |
+
catch { reject(new Error("Unexpected response from UptimeRobot")); }
|
| 168 |
+
});
|
| 169 |
+
},
|
| 170 |
+
);
|
| 171 |
+
request.on("error", reject);
|
| 172 |
+
request.write(body);
|
| 173 |
+
request.end();
|
| 174 |
+
});
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
async function createUptimeRobotMonitor(apiKey, host) {
|
| 178 |
+
const cleanHost = String(host || "").replace(/^https?:\/\//, "").replace(/\/.*$/, "");
|
| 179 |
+
if (!cleanHost) throw new Error("Missing Space host.");
|
| 180 |
+
const monitorUrl = `https://${cleanHost}/health`;
|
| 181 |
+
const existing = await postUptimeRobot("/v2/getMonitors", {
|
| 182 |
+
api_key: apiKey, format: "json", logs: "0", response_times: "0", response_times_limit: "1",
|
| 183 |
+
});
|
| 184 |
+
const existingMonitor = Array.isArray(existing.monitors)
|
| 185 |
+
? existing.monitors.find((m) => m.url === monitorUrl) : null;
|
| 186 |
+
if (existingMonitor) return { created: false, message: `Monitor already exists for ${monitorUrl}` };
|
| 187 |
+
const created = await postUptimeRobot("/v2/newMonitor", {
|
| 188 |
+
api_key: apiKey, format: "json", type: "1",
|
| 189 |
+
friendly_name: `HuggingPost ${cleanHost}`,
|
| 190 |
+
url: monitorUrl, interval: "300",
|
| 191 |
+
});
|
| 192 |
+
if (created.stat !== "ok") {
|
| 193 |
+
const message = created?.error?.message || created?.message || "Failed to create UptimeRobot monitor.";
|
| 194 |
+
throw new Error(message);
|
| 195 |
+
}
|
| 196 |
+
return { created: true, message: `Monitor created for ${monitorUrl}` };
|
| 197 |
+
}
|
| 198 |
+
|
| 199 |
+
// ============================================================================
|
| 200 |
+
// Status helpers
|
| 201 |
+
// ============================================================================
|
| 202 |
+
|
| 203 |
+
function readSyncStatus() {
|
| 204 |
+
try {
|
| 205 |
+
if (fs.existsSync("/tmp/sync-status.json")) {
|
| 206 |
+
return JSON.parse(fs.readFileSync("/tmp/sync-status.json", "utf8"));
|
| 207 |
+
}
|
| 208 |
+
} catch {}
|
| 209 |
+
if (HF_BACKUP_ENABLED) {
|
| 210 |
+
return {
|
| 211 |
+
db_status: "unknown", last_sync_time: null, last_error: null, sync_count: 0,
|
| 212 |
+
status: "configured",
|
| 213 |
+
message: `Backup enabled. Waiting for first sync (every ${SYNC_INTERVAL}s).`,
|
| 214 |
+
};
|
| 215 |
+
}
|
| 216 |
+
return { db_status: "unknown", last_sync_time: null, last_error: null, sync_count: 0 };
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
function checkPostizHealth() {
|
| 220 |
+
return new Promise((resolve) => {
|
| 221 |
+
const timeout = setTimeout(() => resolve({ status: "unreachable", reason: "timeout" }), 5000);
|
| 222 |
+
http.get(`http://${POSTIZ_HOST}:${POSTIZ_PORT}/`, (res) => {
|
| 223 |
+
clearTimeout(timeout);
|
| 224 |
+
resolve({ status: res.statusCode < 500 ? "running" : "error", statusCode: res.statusCode });
|
| 225 |
+
res.resume();
|
| 226 |
+
}).on("error", (err) => {
|
| 227 |
+
clearTimeout(timeout);
|
| 228 |
+
resolve({ status: "unreachable", reason: err.message });
|
| 229 |
+
});
|
| 230 |
+
});
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
function formatUptime(seconds) {
|
| 234 |
+
const h = Math.floor(seconds / 3600);
|
| 235 |
+
const m = Math.floor((seconds % 3600) / 60);
|
| 236 |
+
return `${h}h ${m}m`;
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
// ============================================================================
|
| 240 |
+
// Dashboard HTML
|
| 241 |
+
// ============================================================================
|
| 242 |
+
|
| 243 |
+
function renderDashboard(initialData) {
|
| 244 |
+
const keepAwakeHtml = !UPTIMEROBOT_SETUP_ENABLED
|
| 245 |
+
? `<div class="helper-summary">UptimeRobot setup is disabled for this Space.</div>`
|
| 246 |
+
: initialData.spacePrivate
|
| 247 |
+
? `<div class="helper-summary"><strong>Space is private.</strong> External monitors cannot reach private HF Spaces. Switch to public to use keep-awake.</div>`
|
| 248 |
+
: `
|
| 249 |
+
<div id="uptimerobot-summary" class="helper-summary">
|
| 250 |
+
One-time setup for public Spaces. Paste your UptimeRobot <strong>Main API key</strong> to create the monitor.
|
| 251 |
+
</div>
|
| 252 |
+
<button id="uptimerobot-toggle" class="helper-toggle" type="button">Set Up Monitor</button>
|
| 253 |
+
<div id="uptimerobot-shell" class="helper-shell hidden">
|
| 254 |
+
<div class="helper-copy">
|
| 255 |
+
Do <strong>not</strong> use the Read-only API key or a Monitor-specific API key.
|
| 256 |
+
</div>
|
| 257 |
+
<div class="helper-row">
|
| 258 |
+
<input id="uptimerobot-key" class="helper-input" type="password"
|
| 259 |
+
placeholder="Paste your UptimeRobot Main API key" autocomplete="off" />
|
| 260 |
+
<button id="uptimerobot-btn" class="helper-button" type="button">Create Monitor</button>
|
| 261 |
+
</div>
|
| 262 |
+
<div class="helper-note">One-time setup. Your key is only used to create the monitor for this Space.</div>
|
| 263 |
+
</div>
|
| 264 |
+
<div id="uptimerobot-result" class="helper-result"></div>`;
|
| 265 |
+
|
| 266 |
+
const syncStatus = initialData.sync;
|
| 267 |
+
const hasBackup = HF_BACKUP_ENABLED;
|
| 268 |
+
const lastSync = syncStatus.last_sync_time ? new Date(syncStatus.last_sync_time).toLocaleString() : "Never";
|
| 269 |
+
const syncError = syncStatus.last_error || null;
|
| 270 |
+
|
| 271 |
+
const syncBadge = !hasBackup
|
| 272 |
+
? `<div class="status-badge status-offline">Disabled</div>`
|
| 273 |
+
: syncError
|
| 274 |
+
? `<div class="status-badge status-error">Error</div>`
|
| 275 |
+
: syncStatus.last_sync_time
|
| 276 |
+
? `<div class="status-badge status-online"><div class="pulse"></div>Enabled</div>`
|
| 277 |
+
: `<div class="status-badge status-syncing"><div class="pulse" style="background:#3b82f6"></div>Pending</div>`;
|
| 278 |
+
|
| 279 |
+
const postizBadge = initialData.postizRunning
|
| 280 |
+
? `<div class="status-badge status-online"><div class="pulse"></div>Running</div>`
|
| 281 |
+
: `<div class="status-badge status-offline">Booting</div>`;
|
| 282 |
+
|
| 283 |
+
return `<!DOCTYPE html>
|
| 284 |
+
<html lang="en">
|
| 285 |
+
<head>
|
| 286 |
+
<meta charset="UTF-8">
|
| 287 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 288 |
+
<title>HuggingPost Dashboard</title>
|
| 289 |
+
<link href="https://fonts.googleapis.com/css2?family=Outfit:wght@300;400;600&display=swap" rel="stylesheet">
|
| 290 |
+
<style>
|
| 291 |
+
:root {
|
| 292 |
+
--bg: #0f172a;
|
| 293 |
+
--card-bg: rgba(30, 41, 59, 0.7);
|
| 294 |
+
--accent: linear-gradient(135deg, #ec4899, #8b5cf6);
|
| 295 |
+
--text: #f8fafc;
|
| 296 |
+
--text-dim: #94a3b8;
|
| 297 |
+
--success: #10b981;
|
| 298 |
+
--error: #ef4444;
|
| 299 |
+
--warning: #f59e0b;
|
| 300 |
+
}
|
| 301 |
+
* { box-sizing: border-box; margin: 0; padding: 0; }
|
| 302 |
+
body {
|
| 303 |
+
font-family: 'Outfit', sans-serif;
|
| 304 |
+
background-color: var(--bg);
|
| 305 |
+
color: var(--text);
|
| 306 |
+
display: flex;
|
| 307 |
+
justify-content: center;
|
| 308 |
+
align-items: flex-start;
|
| 309 |
+
min-height: 100vh;
|
| 310 |
+
padding: 24px 0;
|
| 311 |
+
background-image:
|
| 312 |
+
radial-gradient(at 0% 0%, rgba(236, 72, 153, 0.15) 0px, transparent 50%),
|
| 313 |
+
radial-gradient(at 100% 0%, rgba(139, 92, 246, 0.15) 0px, transparent 50%);
|
| 314 |
+
}
|
| 315 |
+
.dashboard {
|
| 316 |
+
width: 90%; max-width: 600px;
|
| 317 |
+
background: var(--card-bg);
|
| 318 |
+
backdrop-filter: blur(12px);
|
| 319 |
+
border: 1px solid rgba(255,255,255,0.1);
|
| 320 |
+
border-radius: 24px; padding: 40px;
|
| 321 |
+
box-shadow: 0 25px 50px -12px rgba(0,0,0,0.5);
|
| 322 |
+
animation: fadeIn 0.8s ease-out;
|
| 323 |
+
margin: 24px 0;
|
| 324 |
+
}
|
| 325 |
+
@keyframes fadeIn { from { opacity:0; transform:translateY(20px); } to { opacity:1; transform:translateY(0); } }
|
| 326 |
+
header { text-align: center; margin-bottom: 40px; }
|
| 327 |
+
h1 {
|
| 328 |
+
font-size: 2.5rem; margin-bottom: 8px;
|
| 329 |
+
background: var(--accent);
|
| 330 |
+
-webkit-background-clip: text;
|
| 331 |
+
-webkit-text-fill-color: transparent;
|
| 332 |
+
font-weight: 600;
|
| 333 |
+
}
|
| 334 |
+
.subtitle { color: var(--text-dim); font-size: 0.9rem; letter-spacing: 1px; text-transform: uppercase; }
|
| 335 |
+
.stats-grid { display: grid; grid-template-columns: repeat(2, 1fr); gap: 20px; margin-bottom: 20px; }
|
| 336 |
+
.stat-card {
|
| 337 |
+
background: rgba(255,255,255,0.03);
|
| 338 |
+
border: 1px solid rgba(255,255,255,0.05);
|
| 339 |
+
padding: 20px; border-radius: 16px;
|
| 340 |
+
transition: transform 0.3s ease, border-color 0.3s ease;
|
| 341 |
+
}
|
| 342 |
+
.stat-card:hover { transform: translateY(-3px); border-color: rgba(236,72,153,0.3); }
|
| 343 |
+
.stat-label { color: var(--text-dim); font-size: 0.75rem; text-transform: uppercase; margin-bottom: 8px; display: block; }
|
| 344 |
+
.stat-value { font-size: 1.1rem; font-weight: 600; }
|
| 345 |
+
.stat-btn {
|
| 346 |
+
grid-column: span 2;
|
| 347 |
+
background: var(--accent);
|
| 348 |
+
color: #fff; padding: 16px;
|
| 349 |
+
border-radius: 16px; text-align: center;
|
| 350 |
+
text-decoration: none; font-weight: 600;
|
| 351 |
+
display: block;
|
| 352 |
+
transition: transform 0.3s ease, box-shadow 0.3s ease;
|
| 353 |
+
box-shadow: 0 10px 20px -5px rgba(236,72,153,0.4);
|
| 354 |
+
}
|
| 355 |
+
.stat-btn:hover { transform: scale(1.02); box-shadow: 0 15px 30px -5px rgba(236,72,153,0.6); }
|
| 356 |
+
.status-badge {
|
| 357 |
+
display: inline-flex; align-items: center; gap: 6px;
|
| 358 |
+
padding: 4px 12px; border-radius: 20px;
|
| 359 |
+
font-size: 0.8rem; font-weight: 600;
|
| 360 |
+
}
|
| 361 |
+
.status-online { background: rgba(16,185,129,0.1); color: var(--success); }
|
| 362 |
+
.status-offline { background: rgba(239,68,68,0.1); color: var(--error); }
|
| 363 |
+
.status-syncing { background: rgba(59,130,246,0.1); color: #3b82f6; }
|
| 364 |
+
.status-error { background: rgba(239,68,68,0.1); color: var(--error); }
|
| 365 |
+
.pulse {
|
| 366 |
+
width: 8px; height: 8px; border-radius: 50%;
|
| 367 |
+
background: currentColor;
|
| 368 |
+
box-shadow: 0 0 0 0 rgba(16,185,129,0.7);
|
| 369 |
+
animation: pulse 2s infinite;
|
| 370 |
+
}
|
| 371 |
+
@keyframes pulse {
|
| 372 |
+
0% { transform: scale(0.95); box-shadow: 0 0 0 0 rgba(16,185,129,0.7); }
|
| 373 |
+
70% { transform: scale(1); box-shadow: 0 0 0 10px rgba(16,185,129,0); }
|
| 374 |
+
100% { transform: scale(0.95); box-shadow: 0 0 0 0 rgba(16,185,129,0); }
|
| 375 |
+
}
|
| 376 |
+
.card-header { display: flex; align-items: center; justify-content: space-between; gap: 12px; margin-bottom: 8px; }
|
| 377 |
+
.card-header .stat-label { margin-bottom: 0; }
|
| 378 |
+
.sync-info { background: rgba(255,255,255,0.02); padding: 15px; border-radius: 12px; font-size: 0.85rem; color: var(--text-dim); margin-top: 10px; }
|
| 379 |
+
#sync-msg { color: var(--text); display: block; margin-top: 4px; }
|
| 380 |
+
.helper-card { width: 100%; margin-top: 20px; }
|
| 381 |
+
.helper-copy { color: var(--text-dim); font-size: 0.92rem; line-height: 1.6; margin-top: 10px; }
|
| 382 |
+
.helper-copy strong { color: var(--text); }
|
| 383 |
+
.helper-row { display: flex; gap: 10px; margin-top: 16px; flex-wrap: wrap; }
|
| 384 |
+
.helper-input {
|
| 385 |
+
flex: 1; min-width: 240px;
|
| 386 |
+
background: rgba(255,255,255,0.04);
|
| 387 |
+
border: 1px solid rgba(255,255,255,0.08);
|
| 388 |
+
color: var(--text); border-radius: 12px;
|
| 389 |
+
padding: 14px 16px; font: inherit;
|
| 390 |
+
}
|
| 391 |
+
.helper-input::placeholder { color: var(--text-dim); }
|
| 392 |
+
.helper-button {
|
| 393 |
+
background: var(--accent); color: #fff; border: 0;
|
| 394 |
+
border-radius: 12px; padding: 14px 18px;
|
| 395 |
+
font: inherit; font-weight: 600; cursor: pointer; min-width: 180px;
|
| 396 |
+
}
|
| 397 |
+
.helper-button:disabled { opacity: 0.6; cursor: wait; }
|
| 398 |
+
.hidden { display: none !important; }
|
| 399 |
+
.helper-note { margin-top: 10px; font-size: 0.82rem; color: var(--text-dim); }
|
| 400 |
+
.helper-result { margin-top: 14px; padding: 12px 14px; border-radius: 12px; font-size: 0.9rem; display: none; }
|
| 401 |
+
.helper-result.ok { display: block; background: rgba(16,185,129,0.1); color: var(--success); }
|
| 402 |
+
.helper-result.error { display: block; background: rgba(239,68,68,0.1); color: var(--error); }
|
| 403 |
+
.helper-shell { margin-top: 12px; }
|
| 404 |
+
.helper-shell.hidden { display: none; }
|
| 405 |
+
.helper-summary {
|
| 406 |
+
margin-top: 14px; padding: 12px 14px; border-radius: 12px;
|
| 407 |
+
background: rgba(255,255,255,0.03); color: var(--text-dim);
|
| 408 |
+
font-size: 0.9rem; line-height: 1.5;
|
| 409 |
+
}
|
| 410 |
+
.helper-summary strong { color: var(--text); }
|
| 411 |
+
.helper-summary.success { background: rgba(16,185,129,0.08); }
|
| 412 |
+
.helper-toggle {
|
| 413 |
+
margin-top: 14px; display: inline-flex; align-items: center; justify-content: center;
|
| 414 |
+
background: rgba(255,255,255,0.04); color: var(--text);
|
| 415 |
+
border: 1px solid rgba(255,255,255,0.08); border-radius: 12px;
|
| 416 |
+
padding: 12px 16px; font: inherit; font-weight: 600; cursor: pointer;
|
| 417 |
+
}
|
| 418 |
+
.footer { text-align: center; color: var(--text-dim); font-size: 0.8rem; margin-top: 20px; }
|
| 419 |
+
@media (max-width: 700px) {
|
| 420 |
+
body { padding: 16px 0; }
|
| 421 |
+
.dashboard { width: calc(100% - 24px); padding: 24px; border-radius: 18px; margin: 12px 0; }
|
| 422 |
+
header { margin-bottom: 28px; }
|
| 423 |
+
h1 { font-size: 2rem; }
|
| 424 |
+
.stats-grid { grid-template-columns: 1fr; gap: 14px; margin-bottom: 16px; }
|
| 425 |
+
.stat-btn { grid-column: span 1; }
|
| 426 |
+
.helper-row { flex-direction: column; }
|
| 427 |
+
.helper-input, .helper-button { width: 100%; min-width: 0; }
|
| 428 |
+
}
|
| 429 |
+
</style>
|
| 430 |
+
</head>
|
| 431 |
+
<body>
|
| 432 |
+
<div class="dashboard">
|
| 433 |
+
<header>
|
| 434 |
+
<h1>📮 HuggingPost</h1>
|
| 435 |
+
<p class="subtitle">Postiz on HF Spaces</p>
|
| 436 |
+
</header>
|
| 437 |
+
|
| 438 |
+
<div class="stats-grid">
|
| 439 |
+
<div class="stat-card">
|
| 440 |
+
<div class="card-header">
|
| 441 |
+
<span class="stat-label">Postiz</span>
|
| 442 |
+
<span id="postiz-badge">${postizBadge}</span>
|
| 443 |
+
</div>
|
| 444 |
+
<div style="margin-top: 8px; font-size: 0.82rem; color: var(--text-dim);">
|
| 445 |
+
Mounted at <strong style="color:var(--text)">/app</strong> · <a href="/app/" style="color:#f472b6;text-decoration:none;" target="_blank">Open UI →</a>
|
| 446 |
+
</div>
|
| 447 |
+
</div>
|
| 448 |
+
<div class="stat-card">
|
| 449 |
+
<span class="stat-label">Uptime</span>
|
| 450 |
+
<span class="stat-value" id="uptime">${formatUptime(Math.floor((Date.now() - startTime) / 1000))}</span>
|
| 451 |
+
</div>
|
| 452 |
+
<div class="stat-card">
|
| 453 |
+
<div class="card-header">
|
| 454 |
+
<span class="stat-label">Backup</span>
|
| 455 |
+
<span id="sync-badge">${syncBadge}</span>
|
| 456 |
+
</div>
|
| 457 |
+
<div style="margin-top: 8px; font-size: 0.82rem; color: var(--text-dim);">
|
| 458 |
+
Last sync: <span id="last-sync">${lastSync}</span>
|
| 459 |
+
</div>
|
| 460 |
+
</div>
|
| 461 |
+
<div class="stat-card">
|
| 462 |
+
<span class="stat-label">Database</span>
|
| 463 |
+
<span class="stat-value" id="db-status">${syncStatus.db_status === "connected" ? "PostgreSQL ✓" : syncStatus.db_status === "error" ? "Error" : "PostgreSQL"}</span>
|
| 464 |
+
</div>
|
| 465 |
+
<a href="/app/" id="open-ui-btn" class="stat-btn" target="_blank" rel="noopener noreferrer">Open Postiz →</a>
|
| 466 |
+
</div>
|
| 467 |
+
|
| 468 |
+
<div class="stat-card" style="width: 100%; margin-bottom: 20px;">
|
| 469 |
+
<div class="card-header">
|
| 470 |
+
<span class="stat-label">Backup Sync</span>
|
| 471 |
+
<div id="sync-badge-detail">${syncBadge}</div>
|
| 472 |
+
</div>
|
| 473 |
+
<div class="sync-info">
|
| 474 |
+
Last activity: <span id="sync-time-detail">${lastSync}</span>
|
| 475 |
+
<span id="sync-msg">${syncError ? "Error: " + syncError : syncStatus.last_sync_time ? "Sync successful" : hasBackup ? "Waiting for first sync..." : "HF_TOKEN not set — backups disabled"}</span>
|
| 476 |
+
</div>
|
| 477 |
+
</div>
|
| 478 |
+
|
| 479 |
+
<div class="stat-card helper-card">
|
| 480 |
+
<span class="stat-label">Keep Space Awake</span>
|
| 481 |
+
${keepAwakeHtml}
|
| 482 |
+
</div>
|
| 483 |
+
|
| 484 |
+
<div class="footer">Live updates every 30s · Schedule posts only fire while the Space is awake</div>
|
| 485 |
+
</div>
|
| 486 |
+
|
| 487 |
+
<script>
|
| 488 |
+
const KEEP_AWAKE_PRIVATE = ${initialData.spacePrivate ? "true" : "false"};
|
| 489 |
+
const KEEP_AWAKE_SETUP_ENABLED = ${UPTIMEROBOT_SETUP_ENABLED ? "true" : "false"};
|
| 490 |
+
const monitorStateKey = 'huggingpost_uptimerobot_v1';
|
| 491 |
+
|
| 492 |
+
function getCurrentSearch() { return window.location.search || ''; }
|
| 493 |
+
|
| 494 |
+
function renderSyncBadge(status, lastSyncTime, lastError) {
|
| 495 |
+
if (!${hasBackup}) return '<div class="status-badge status-offline">Disabled</div>';
|
| 496 |
+
if (lastError) return '<div class="status-badge status-error">Error</div>';
|
| 497 |
+
if (lastSyncTime) return '<div class="status-badge status-online"><div class="pulse"></div>Enabled</div>';
|
| 498 |
+
return '<div class="status-badge status-syncing"><div class="pulse" style="background:#3b82f6"></div>Pending</div>';
|
| 499 |
+
}
|
| 500 |
+
|
| 501 |
+
async function updateStatus() {
|
| 502 |
+
try {
|
| 503 |
+
const res = await fetch('/status' + getCurrentSearch());
|
| 504 |
+
const data = await res.json();
|
| 505 |
+
document.getElementById('uptime').textContent = data.uptime;
|
| 506 |
+
|
| 507 |
+
const pbadge = data.postizRunning
|
| 508 |
+
? '<div class="status-badge status-online"><div class="pulse"></div>Running</div>'
|
| 509 |
+
: '<div class="status-badge status-offline">Booting</div>';
|
| 510 |
+
document.getElementById('postiz-badge').innerHTML = pbadge;
|
| 511 |
+
|
| 512 |
+
const badge = renderSyncBadge(data.sync.db_status, data.sync.last_sync_time, data.sync.last_error);
|
| 513 |
+
document.getElementById('sync-badge').innerHTML = badge;
|
| 514 |
+
document.getElementById('sync-badge-detail').innerHTML = badge;
|
| 515 |
+
|
| 516 |
+
const lastSync = data.sync.last_sync_time ? new Date(data.sync.last_sync_time).toLocaleString() : 'Never';
|
| 517 |
+
document.getElementById('last-sync').textContent = lastSync;
|
| 518 |
+
document.getElementById('sync-time-detail').textContent = lastSync;
|
| 519 |
+
|
| 520 |
+
const syncMsg = data.sync.last_error ? 'Error: ' + data.sync.last_error
|
| 521 |
+
: data.sync.last_sync_time ? 'Sync successful'
|
| 522 |
+
: ${hasBackup} ? 'Waiting for first sync...' : 'HF_TOKEN not set — backups disabled';
|
| 523 |
+
document.getElementById('sync-msg').textContent = syncMsg;
|
| 524 |
+
|
| 525 |
+
const dbEl = document.getElementById('db-status');
|
| 526 |
+
dbEl.textContent = data.sync.db_status === 'connected' ? 'PostgreSQL ✓'
|
| 527 |
+
: data.sync.db_status === 'error' ? 'Error' : 'PostgreSQL';
|
| 528 |
+
} catch (e) { console.error('Status update failed:', e); }
|
| 529 |
+
}
|
| 530 |
+
|
| 531 |
+
function setMonitorUiState(isConfigured) {
|
| 532 |
+
const summary = document.getElementById('uptimerobot-summary');
|
| 533 |
+
const shell = document.getElementById('uptimerobot-shell');
|
| 534 |
+
const toggle = document.getElementById('uptimerobot-toggle');
|
| 535 |
+
if (!summary || !shell || !toggle) return;
|
| 536 |
+
if (isConfigured) {
|
| 537 |
+
summary.classList.add('success');
|
| 538 |
+
summary.innerHTML = '<strong>Already set up.</strong> Your UptimeRobot monitor should keep this public Space awake.';
|
| 539 |
+
shell.classList.add('hidden');
|
| 540 |
+
toggle.textContent = 'Set Up Again';
|
| 541 |
+
} else {
|
| 542 |
+
summary.classList.remove('success');
|
| 543 |
+
summary.innerHTML = 'One-time setup for public Spaces. Paste your UptimeRobot <strong>Main API key</strong> to create the monitor.';
|
| 544 |
+
toggle.textContent = 'Set Up Monitor';
|
| 545 |
+
}
|
| 546 |
+
}
|
| 547 |
+
|
| 548 |
+
function restoreMonitorUiState() {
|
| 549 |
+
try { setMonitorUiState(window.localStorage.getItem(monitorStateKey) === 'done'); }
|
| 550 |
+
catch { setMonitorUiState(false); }
|
| 551 |
+
}
|
| 552 |
+
|
| 553 |
+
async function setupUptimeRobot() {
|
| 554 |
+
const input = document.getElementById('uptimerobot-key');
|
| 555 |
+
const button = document.getElementById('uptimerobot-btn');
|
| 556 |
+
const result = document.getElementById('uptimerobot-result');
|
| 557 |
+
const apiKey = input.value.trim();
|
| 558 |
+
if (!apiKey) {
|
| 559 |
+
result.className = 'helper-result error';
|
| 560 |
+
result.textContent = 'Paste your UptimeRobot Main API key first.';
|
| 561 |
+
return;
|
| 562 |
+
}
|
| 563 |
+
button.disabled = true;
|
| 564 |
+
button.textContent = 'Creating...';
|
| 565 |
+
result.className = 'helper-result';
|
| 566 |
+
result.textContent = '';
|
| 567 |
+
try {
|
| 568 |
+
const res = await fetch('/uptimerobot/setup' + getCurrentSearch(), {
|
| 569 |
+
method: 'POST',
|
| 570 |
+
headers: { 'Content-Type': 'application/json' },
|
| 571 |
+
body: JSON.stringify({ apiKey }),
|
| 572 |
+
});
|
| 573 |
+
const data = await res.json();
|
| 574 |
+
if (!res.ok) throw new Error(data.message || 'Failed to create monitor.');
|
| 575 |
+
result.className = 'helper-result ok';
|
| 576 |
+
result.textContent = data.message || 'UptimeRobot monitor is ready.';
|
| 577 |
+
input.value = '';
|
| 578 |
+
try { window.localStorage.setItem(monitorStateKey, 'done'); } catch {}
|
| 579 |
+
setMonitorUiState(true);
|
| 580 |
+
document.getElementById('uptimerobot-shell').classList.add('hidden');
|
| 581 |
+
} catch (error) {
|
| 582 |
+
result.className = 'helper-result error';
|
| 583 |
+
result.textContent = error.message || 'Failed to create monitor.';
|
| 584 |
+
} finally {
|
| 585 |
+
button.disabled = false;
|
| 586 |
+
button.textContent = 'Create Monitor';
|
| 587 |
+
}
|
| 588 |
+
}
|
| 589 |
+
|
| 590 |
+
updateStatus();
|
| 591 |
+
setInterval(updateStatus, 30000);
|
| 592 |
+
|
| 593 |
+
if (KEEP_AWAKE_SETUP_ENABLED && !KEEP_AWAKE_PRIVATE) {
|
| 594 |
+
restoreMonitorUiState();
|
| 595 |
+
const toggleBtn = document.getElementById('uptimerobot-toggle');
|
| 596 |
+
const createBtn = document.getElementById('uptimerobot-btn');
|
| 597 |
+
if (toggleBtn) toggleBtn.addEventListener('click', () => {
|
| 598 |
+
document.getElementById('uptimerobot-shell').classList.toggle('hidden');
|
| 599 |
+
});
|
| 600 |
+
if (createBtn) createBtn.addEventListener('click', setupUptimeRobot);
|
| 601 |
+
}
|
| 602 |
+
</script>
|
| 603 |
+
</body>
|
| 604 |
+
</html>`;
|
| 605 |
+
}
|
| 606 |
+
|
| 607 |
+
// ============================================================================
|
| 608 |
+
// Request body reader
|
| 609 |
+
// ============================================================================
|
| 610 |
+
|
| 611 |
+
function readRequestBody(req) {
|
| 612 |
+
return new Promise((resolve, reject) => {
|
| 613 |
+
let body = "";
|
| 614 |
+
req.on("data", (chunk) => {
|
| 615 |
+
body += chunk;
|
| 616 |
+
if (body.length > 64 * 1024) { reject(new Error("Request too large")); req.destroy(); }
|
| 617 |
+
});
|
| 618 |
+
req.on("end", () => resolve(body));
|
| 619 |
+
req.on("error", reject);
|
| 620 |
+
});
|
| 621 |
+
}
|
| 622 |
+
|
| 623 |
+
// ============================================================================
|
| 624 |
+
// Reverse proxy
|
| 625 |
+
// ============================================================================
|
| 626 |
+
|
| 627 |
+
function buildProxyHeaders(headers) {
|
| 628 |
+
const f = headers["x-forwarded-for"];
|
| 629 |
+
const clientIp = typeof f === "string" ? f.split(",")[0].trim()
|
| 630 |
+
: (Array.isArray(f) && f.length ? String(f[0]).split(",")[0].trim() : "");
|
| 631 |
+
return {
|
| 632 |
+
...headers,
|
| 633 |
+
host: `${POSTIZ_HOST}:${POSTIZ_PORT}`,
|
| 634 |
+
"x-forwarded-for": clientIp,
|
| 635 |
+
"x-forwarded-host": headers.host || "",
|
| 636 |
+
"x-forwarded-proto": headers["x-forwarded-proto"] || "https",
|
| 637 |
+
};
|
| 638 |
+
}
|
| 639 |
+
|
| 640 |
+
function proxyHttp(req, res, overridePath) {
|
| 641 |
+
const targetPath = overridePath !== undefined ? overridePath : req.url;
|
| 642 |
+
let upstreamStarted = false;
|
| 643 |
+
const proxyReq = http.request(
|
| 644 |
+
{ hostname: POSTIZ_HOST, port: POSTIZ_PORT, method: req.method,
|
| 645 |
+
path: targetPath, headers: buildProxyHeaders(req.headers) },
|
| 646 |
+
(proxyRes) => {
|
| 647 |
+
upstreamStarted = true;
|
| 648 |
+
res.writeHead(proxyRes.statusCode || 502, proxyRes.headers);
|
| 649 |
+
proxyRes.pipe(res);
|
| 650 |
+
},
|
| 651 |
+
);
|
| 652 |
+
proxyReq.on("error", (error) => {
|
| 653 |
+
if (res.headersSent || upstreamStarted) { res.destroy(); return; }
|
| 654 |
+
res.writeHead(502, { "Content-Type": "application/json" });
|
| 655 |
+
res.end(JSON.stringify({
|
| 656 |
+
status: "error",
|
| 657 |
+
message: "Postiz unavailable",
|
| 658 |
+
detail: error.message,
|
| 659 |
+
hint: "Postiz may still be starting (first boot ~60s after build). Check the Logs tab.",
|
| 660 |
+
}));
|
| 661 |
+
});
|
| 662 |
+
res.on("close", () => proxyReq.destroy());
|
| 663 |
+
req.pipe(proxyReq);
|
| 664 |
+
}
|
| 665 |
+
|
| 666 |
+
function proxyUpgrade(req, socket, head, overridePath) {
|
| 667 |
+
const targetPath = overridePath !== undefined ? overridePath : req.url;
|
| 668 |
+
const proxySocket = net.connect(POSTIZ_PORT, POSTIZ_HOST);
|
| 669 |
+
proxySocket.on("connect", () => {
|
| 670 |
+
const f = req.headers["x-forwarded-for"];
|
| 671 |
+
const clientIp = typeof f === "string" ? f.split(",")[0].trim() : req.socket.remoteAddress || "";
|
| 672 |
+
const headerLines = [];
|
| 673 |
+
for (let i = 0; i < req.rawHeaders.length; i += 2) {
|
| 674 |
+
const name = req.rawHeaders[i];
|
| 675 |
+
const value = req.rawHeaders[i + 1];
|
| 676 |
+
const lower = String(name).toLowerCase();
|
| 677 |
+
if (lower === "host" || lower.startsWith("x-forwarded-")) continue;
|
| 678 |
+
headerLines.push(`${name}: ${value}`);
|
| 679 |
+
}
|
| 680 |
+
const lines = [
|
| 681 |
+
`${req.method} ${targetPath} HTTP/${req.httpVersion}`,
|
| 682 |
+
...headerLines,
|
| 683 |
+
`Host: ${POSTIZ_HOST}:${POSTIZ_PORT}`,
|
| 684 |
+
`X-Forwarded-For: ${clientIp}`,
|
| 685 |
+
`X-Forwarded-Host: ${req.headers.host || ""}`,
|
| 686 |
+
`X-Forwarded-Proto: ${req.headers["x-forwarded-proto"] || "https"}`,
|
| 687 |
+
"", "",
|
| 688 |
+
];
|
| 689 |
+
proxySocket.write(lines.join("\r\n"));
|
| 690 |
+
if (head && head.length > 0) proxySocket.write(head);
|
| 691 |
+
socket.pipe(proxySocket).pipe(socket);
|
| 692 |
+
});
|
| 693 |
+
proxySocket.on("error", () => {
|
| 694 |
+
if (socket.writable) socket.write("HTTP/1.1 502 Bad Gateway\r\nConnection: close\r\n\r\n");
|
| 695 |
+
socket.destroy();
|
| 696 |
+
});
|
| 697 |
+
socket.on("error", () => proxySocket.destroy());
|
| 698 |
+
}
|
| 699 |
+
|
| 700 |
+
// ============================================================================
|
| 701 |
+
// HTTP Server
|
| 702 |
+
// ============================================================================
|
| 703 |
+
|
| 704 |
+
const server = http.createServer((req, res) => {
|
| 705 |
+
const parsedUrl = parseRequestUrl(req.url || "/");
|
| 706 |
+
const pathname = parsedUrl.pathname;
|
| 707 |
+
const uptime = Math.floor((Date.now() - startTime) / 1000);
|
| 708 |
+
|
| 709 |
+
// ── /health ──────────────────────────────────────────────────────────────
|
| 710 |
+
if (pathname === "/health") {
|
| 711 |
+
res.writeHead(200, { "Content-Type": "application/json" });
|
| 712 |
+
res.end(JSON.stringify({
|
| 713 |
+
status: "ok", uptime, uptimeHuman: formatUptime(uptime),
|
| 714 |
+
timestamp: new Date().toISOString(), sync: readSyncStatus(),
|
| 715 |
+
}));
|
| 716 |
+
return;
|
| 717 |
+
}
|
| 718 |
+
|
| 719 |
+
// ── /status ──────────────────────────────────────────────────────────────
|
| 720 |
+
if (pathname === "/status") {
|
| 721 |
+
void (async () => {
|
| 722 |
+
const postiz = await checkPostizHealth();
|
| 723 |
+
res.writeHead(200, { "Content-Type": "application/json" });
|
| 724 |
+
res.end(JSON.stringify({
|
| 725 |
+
uptime: formatUptime(uptime),
|
| 726 |
+
postizRunning: postiz.status === "running",
|
| 727 |
+
sync: readSyncStatus(),
|
| 728 |
+
}));
|
| 729 |
+
})();
|
| 730 |
+
return;
|
| 731 |
+
}
|
| 732 |
+
|
| 733 |
+
// ── /uptimerobot/setup ───────────────────────────────────────────────────
|
| 734 |
+
if (pathname === "/uptimerobot/setup") {
|
| 735 |
+
if (req.method !== "POST") {
|
| 736 |
+
res.writeHead(405, { "Content-Type": "application/json" });
|
| 737 |
+
res.end(JSON.stringify({ message: "Method not allowed" }));
|
| 738 |
+
return;
|
| 739 |
+
}
|
| 740 |
+
void (async () => {
|
| 741 |
+
try {
|
| 742 |
+
if (!UPTIMEROBOT_SETUP_ENABLED) {
|
| 743 |
+
res.writeHead(403, { "Content-Type": "application/json" });
|
| 744 |
+
res.end(JSON.stringify({ message: "Uptime setup is disabled." }));
|
| 745 |
+
return;
|
| 746 |
+
}
|
| 747 |
+
if (isRateLimited(req)) {
|
| 748 |
+
res.writeHead(429, { "Content-Type": "application/json" });
|
| 749 |
+
res.end(JSON.stringify({ message: "Too many requests." }));
|
| 750 |
+
return;
|
| 751 |
+
}
|
| 752 |
+
if (!isAllowedUptimeSetupOrigin(req)) {
|
| 753 |
+
res.writeHead(403, { "Content-Type": "application/json" });
|
| 754 |
+
res.end(JSON.stringify({ message: "Invalid request origin." }));
|
| 755 |
+
return;
|
| 756 |
+
}
|
| 757 |
+
const body = await readRequestBody(req);
|
| 758 |
+
const parsed = JSON.parse(body || "{}");
|
| 759 |
+
const apiKey = String(parsed.apiKey || "").trim();
|
| 760 |
+
if (!isValidUptimeApiKey(apiKey)) {
|
| 761 |
+
res.writeHead(400, { "Content-Type": "application/json" });
|
| 762 |
+
res.end(JSON.stringify({ message: "A valid API key is required." }));
|
| 763 |
+
return;
|
| 764 |
+
}
|
| 765 |
+
const result = await createUptimeRobotMonitor(apiKey, req.headers.host);
|
| 766 |
+
res.writeHead(200, { "Content-Type": "application/json" });
|
| 767 |
+
res.end(JSON.stringify(result));
|
| 768 |
+
} catch (error) {
|
| 769 |
+
res.writeHead(400, { "Content-Type": "application/json" });
|
| 770 |
+
res.end(JSON.stringify({ message: error?.message || "Failed to create UptimeRobot monitor." }));
|
| 771 |
+
}
|
| 772 |
+
})();
|
| 773 |
+
return;
|
| 774 |
+
}
|
| 775 |
+
|
| 776 |
+
// ── Dashboard at exact / ─────────────────────────────────────────────────
|
| 777 |
+
if (pathname === "/" || pathname === "") {
|
| 778 |
+
void (async () => {
|
| 779 |
+
const [postiz, spacePrivate] = await Promise.all([
|
| 780 |
+
checkPostizHealth(),
|
| 781 |
+
resolveSpaceIsPrivate(parsedUrl),
|
| 782 |
+
]);
|
| 783 |
+
const initialData = {
|
| 784 |
+
postizRunning: postiz.status === "running",
|
| 785 |
+
sync: readSyncStatus(),
|
| 786 |
+
spacePrivate,
|
| 787 |
+
};
|
| 788 |
+
res.writeHead(200, { "Content-Type": "text/html; charset=utf-8" });
|
| 789 |
+
res.end(renderDashboard(initialData));
|
| 790 |
+
})();
|
| 791 |
+
return;
|
| 792 |
+
}
|
| 793 |
+
|
| 794 |
+
// ── /app or /app/* → strip prefix, proxy to Postiz nginx :5000 ───────────
|
| 795 |
+
if (pathname === "/app" || pathname.startsWith("/app/")) {
|
| 796 |
+
const stripped = pathname.slice("/app".length) || "/";
|
| 797 |
+
const query = parsedUrl.search || "";
|
| 798 |
+
proxyHttp(req, res, stripped + query);
|
| 799 |
+
return;
|
| 800 |
+
}
|
| 801 |
+
|
| 802 |
+
// ── Stray asset URLs without basePath (Sentry, hardcoded /static) ────────
|
| 803 |
+
// Browser-side libs sometimes emit absolute URLs that bypass Next.js
|
| 804 |
+
// basePath. Catch /_next/* and /static/* at root and 301 to /app/* so the
|
| 805 |
+
// browser learns the right prefix.
|
| 806 |
+
if (pathname.startsWith("/_next/") || pathname.startsWith("/static/")) {
|
| 807 |
+
res.writeHead(301, { Location: "/app" + pathname + (parsedUrl.search || "") });
|
| 808 |
+
res.end();
|
| 809 |
+
return;
|
| 810 |
+
}
|
| 811 |
+
|
| 812 |
+
// ── Anything else → 404 ──────────────────────────────────────────────────
|
| 813 |
+
res.writeHead(404, { "Content-Type": "text/plain" });
|
| 814 |
+
res.end("Not found. Try / for the dashboard or /app for Postiz.");
|
| 815 |
+
});
|
| 816 |
+
|
| 817 |
+
server.on("upgrade", (req, socket, head) => {
|
| 818 |
+
const parsedUrl = parseRequestUrl(req.url || "/");
|
| 819 |
+
const pathname = parsedUrl.pathname;
|
| 820 |
+
if (isLocalRoute(pathname)) { socket.destroy(); return; }
|
| 821 |
+
if (pathname === "/app" || pathname.startsWith("/app/")) {
|
| 822 |
+
const stripped = pathname.slice("/app".length) || "/";
|
| 823 |
+
proxyUpgrade(req, socket, head, stripped + (parsedUrl.search || ""));
|
| 824 |
+
return;
|
| 825 |
+
}
|
| 826 |
+
socket.destroy();
|
| 827 |
+
});
|
| 828 |
+
|
| 829 |
+
server.listen(PORT, "0.0.0.0", () => {
|
| 830 |
+
console.log(`✓ Health server listening on port ${PORT}`);
|
| 831 |
+
console.log(`✓ Dashboard : http://localhost:${PORT}/`);
|
| 832 |
+
console.log(`✓ Postiz : http://localhost:${PORT}/app/ → nginx :${POSTIZ_PORT}`);
|
| 833 |
+
});
|
postiz-sync.py
ADDED
|
@@ -0,0 +1,382 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
HuggingPost backup/restore — Postgres dump + uploads dir + secrets → HF Dataset.
|
| 4 |
+
|
| 5 |
+
Usage:
|
| 6 |
+
python3 postiz-sync.py sync # backup → HF Dataset
|
| 7 |
+
python3 postiz-sync.py restore # HF Dataset → restore DB + uploads + secrets
|
| 8 |
+
|
| 9 |
+
Adapted from HuggingClip/paperclip-sync.py with three differences:
|
| 10 |
+
1. DB user is `postiz` (not `postgres`) — pg_dump is run as the postiz role.
|
| 11 |
+
2. Tarball includes /postiz/uploads (Postiz media) AND /postiz/.secrets
|
| 12 |
+
(jwt secret + db password) so a fresh container can recover identity.
|
| 13 |
+
3. Restore drops + recreates the postiz database before psql replay so we
|
| 14 |
+
don't get "database already exists" / duplicate-key errors.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
import os
|
| 18 |
+
import sys
|
| 19 |
+
import json
|
| 20 |
+
import shutil
|
| 21 |
+
import tarfile
|
| 22 |
+
import tempfile
|
| 23 |
+
import subprocess
|
| 24 |
+
import logging
|
| 25 |
+
import warnings
|
| 26 |
+
from datetime import datetime, timezone
|
| 27 |
+
from pathlib import Path
|
| 28 |
+
|
| 29 |
+
warnings.filterwarnings("ignore", category=UserWarning, module="huggingface_hub")
|
| 30 |
+
|
| 31 |
+
from huggingface_hub import HfApi
|
| 32 |
+
from huggingface_hub.utils import RepositoryNotFoundError, EntryNotFoundError
|
| 33 |
+
import huggingface_hub
|
| 34 |
+
|
| 35 |
+
huggingface_hub.utils.disable_progress_bars()
|
| 36 |
+
|
| 37 |
+
# ── Logging ──────────────────────────────────────────────────────────────────
|
| 38 |
+
logging.basicConfig(level=logging.WARNING, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 39 |
+
logger = logging.getLogger(__name__)
|
| 40 |
+
logger.setLevel(logging.INFO)
|
| 41 |
+
logging.getLogger("httpx").setLevel(logging.WARNING)
|
| 42 |
+
logging.getLogger("huggingface_hub").setLevel(logging.WARNING)
|
| 43 |
+
|
| 44 |
+
# ── Config ───────────────────────────────────────────────────────────────────
|
| 45 |
+
HF_TOKEN = os.environ.get("HF_TOKEN")
|
| 46 |
+
HF_USERNAME = os.environ.get("HF_USERNAME")
|
| 47 |
+
DATABASE_URL = os.environ.get("DATABASE_URL", "postgresql://postiz:postiz@localhost:5432/postiz")
|
| 48 |
+
BACKUP_DATASET_NAME = os.environ.get("BACKUP_DATASET_NAME", "huggingpost-backup")
|
| 49 |
+
SYNC_MAX_FILE_BYTES = int(os.environ.get("SYNC_MAX_FILE_BYTES", str(100 * 1024 * 1024))) # 100 MB
|
| 50 |
+
POSTIZ_HOME = Path(os.environ.get("POSTIZ_HOME", "/postiz"))
|
| 51 |
+
UPLOADS_DIR = Path(os.environ.get("UPLOAD_DIRECTORY", str(POSTIZ_HOME / "uploads")))
|
| 52 |
+
SECRETS_DIR = POSTIZ_HOME / ".secrets"
|
| 53 |
+
STATUS_FILE = Path("/tmp/sync-status.json")
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
# ── Helpers ──────────────────────────────────────────────────────────────────
|
| 57 |
+
def parse_db_url(db_url: str) -> dict:
|
| 58 |
+
try:
|
| 59 |
+
s = db_url.replace("postgres://", "").replace("postgresql://", "")
|
| 60 |
+
if "@" in s:
|
| 61 |
+
creds, host_db = s.split("@", 1)
|
| 62 |
+
if ":" in creds:
|
| 63 |
+
user, password = creds.split(":", 1)
|
| 64 |
+
else:
|
| 65 |
+
user, password = creds, ""
|
| 66 |
+
else:
|
| 67 |
+
user, password, host_db = "postgres", "", s
|
| 68 |
+
if "/" in host_db:
|
| 69 |
+
host_port, database = host_db.rsplit("/", 1)
|
| 70 |
+
else:
|
| 71 |
+
host_port, database = host_db, "postiz"
|
| 72 |
+
if ":" in host_port:
|
| 73 |
+
host, port = host_port.rsplit(":", 1)
|
| 74 |
+
else:
|
| 75 |
+
host, port = host_port, "5432"
|
| 76 |
+
return {"user": user, "password": password, "host": host, "port": port, "database": database}
|
| 77 |
+
except Exception as e:
|
| 78 |
+
logger.error(f"Failed to parse DATABASE_URL: {e}")
|
| 79 |
+
return None
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def write_status(status: dict):
|
| 83 |
+
try:
|
| 84 |
+
STATUS_FILE.write_text(json.dumps(status, indent=2))
|
| 85 |
+
except Exception as e:
|
| 86 |
+
logger.error(f"Failed to write status file: {e}")
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def read_status() -> dict:
|
| 90 |
+
if STATUS_FILE.exists():
|
| 91 |
+
try:
|
| 92 |
+
return json.loads(STATUS_FILE.read_text())
|
| 93 |
+
except Exception:
|
| 94 |
+
pass
|
| 95 |
+
return {"db_status": "unknown", "last_sync_time": None, "last_error": None, "sync_count": 0}
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def env_with_password(db: dict) -> dict:
|
| 99 |
+
env = os.environ.copy()
|
| 100 |
+
if db["password"]:
|
| 101 |
+
env["PGPASSWORD"] = db["password"]
|
| 102 |
+
return env
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
# ── Backup ───────────────────────────────────────────────────────────────────
|
| 106 |
+
def backup_database() -> tuple[str | None, bool]:
|
| 107 |
+
db = parse_db_url(DATABASE_URL)
|
| 108 |
+
if not db:
|
| 109 |
+
return None, False
|
| 110 |
+
|
| 111 |
+
temp_dir = tempfile.mkdtemp()
|
| 112 |
+
dump_file = Path(temp_dir) / "postiz.sql"
|
| 113 |
+
|
| 114 |
+
cmd = [
|
| 115 |
+
"pg_dump",
|
| 116 |
+
f"--host={db['host']}",
|
| 117 |
+
f"--port={db['port']}",
|
| 118 |
+
f"--username={db['user']}",
|
| 119 |
+
"--format=plain",
|
| 120 |
+
"--no-owner",
|
| 121 |
+
"--no-privileges",
|
| 122 |
+
"--clean", # emit DROP statements so restore is idempotent
|
| 123 |
+
"--if-exists",
|
| 124 |
+
db["database"],
|
| 125 |
+
]
|
| 126 |
+
|
| 127 |
+
try:
|
| 128 |
+
with open(dump_file, "w") as f:
|
| 129 |
+
result = subprocess.run(cmd, stdout=f, stderr=subprocess.PIPE, env=env_with_password(db), timeout=600)
|
| 130 |
+
if result.returncode != 0:
|
| 131 |
+
logger.error(f"pg_dump failed: {result.stderr.decode('utf-8', errors='ignore')}")
|
| 132 |
+
return None, False
|
| 133 |
+
size_mb = dump_file.stat().st_size / 1024 / 1024
|
| 134 |
+
logger.debug(f"Database dumped ({size_mb:.2f} MB)")
|
| 135 |
+
return str(dump_file), True
|
| 136 |
+
except subprocess.TimeoutExpired:
|
| 137 |
+
logger.error("pg_dump timed out (>600s)")
|
| 138 |
+
return None, False
|
| 139 |
+
except Exception as e:
|
| 140 |
+
logger.error(f"Database backup error: {e}")
|
| 141 |
+
return None, False
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
def create_backup_tarball(dump_file: str) -> tuple[str | None, bool]:
|
| 145 |
+
temp_dir = tempfile.mkdtemp()
|
| 146 |
+
tarball = Path(temp_dir) / "huggingpost-backup.tar.gz"
|
| 147 |
+
try:
|
| 148 |
+
with tarfile.open(tarball, "w:gz") as tar:
|
| 149 |
+
tar.add(dump_file, arcname="postiz.sql")
|
| 150 |
+
if UPLOADS_DIR.exists():
|
| 151 |
+
tar.add(str(UPLOADS_DIR), arcname="uploads")
|
| 152 |
+
if SECRETS_DIR.exists():
|
| 153 |
+
tar.add(str(SECRETS_DIR), arcname=".secrets")
|
| 154 |
+
size = tarball.stat().st_size
|
| 155 |
+
size_mb = size / 1024 / 1024
|
| 156 |
+
logger.debug(f"Tarball created ({size_mb:.2f} MB)")
|
| 157 |
+
if size > SYNC_MAX_FILE_BYTES:
|
| 158 |
+
logger.error(
|
| 159 |
+
f"Backup too large: {size_mb:.0f} MB > {SYNC_MAX_FILE_BYTES/1024/1024:.0f} MB. "
|
| 160 |
+
"Move uploads to Cloudflare R2 (set STORAGE_PROVIDER=cloudflare) "
|
| 161 |
+
"or raise SYNC_MAX_FILE_BYTES."
|
| 162 |
+
)
|
| 163 |
+
return None, False
|
| 164 |
+
return str(tarball), True
|
| 165 |
+
except Exception as e:
|
| 166 |
+
logger.error(f"Failed to create tarball: {e}")
|
| 167 |
+
return None, False
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
def upload_to_hf(backup_file: str) -> bool:
|
| 171 |
+
if not HF_TOKEN:
|
| 172 |
+
logger.warning("HF_TOKEN not set — skipping upload")
|
| 173 |
+
return False
|
| 174 |
+
try:
|
| 175 |
+
api = HfApi(token=HF_TOKEN)
|
| 176 |
+
username = HF_USERNAME or api.whoami().get("name")
|
| 177 |
+
if not username:
|
| 178 |
+
logger.error("Failed to resolve HF username")
|
| 179 |
+
return False
|
| 180 |
+
dataset_id = f"{username}/{BACKUP_DATASET_NAME}"
|
| 181 |
+
api.create_repo(repo_id=dataset_id, repo_type="dataset", private=True, exist_ok=True)
|
| 182 |
+
api.upload_file(
|
| 183 |
+
path_or_fileobj=backup_file,
|
| 184 |
+
path_in_repo="snapshots/latest.tar.gz",
|
| 185 |
+
repo_id=dataset_id,
|
| 186 |
+
repo_type="dataset",
|
| 187 |
+
commit_message=f"Backup at {datetime.now(timezone.utc).isoformat()}",
|
| 188 |
+
)
|
| 189 |
+
logger.debug(f"Uploaded to {dataset_id}")
|
| 190 |
+
return True
|
| 191 |
+
except Exception as e:
|
| 192 |
+
logger.error(f"HF upload failed: {e}")
|
| 193 |
+
return False
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
# ── Restore ──────────────────────────────────────────────────────────────────
|
| 197 |
+
def restore_database(sql_file: str) -> bool:
|
| 198 |
+
db = parse_db_url(DATABASE_URL)
|
| 199 |
+
if not db:
|
| 200 |
+
return False
|
| 201 |
+
|
| 202 |
+
# Drop+recreate the postiz database as the OS postgres superuser. This
|
| 203 |
+
# bypasses connection-busy errors and gives us a clean slate to replay
|
| 204 |
+
# the dump into. The dump itself was taken with --clean --if-exists so
|
| 205 |
+
# it's also idempotent if we ever skip the recreate.
|
| 206 |
+
try:
|
| 207 |
+
recreate = (
|
| 208 |
+
f"DROP DATABASE IF EXISTS {db['database']} WITH (FORCE); "
|
| 209 |
+
f"CREATE DATABASE {db['database']} OWNER {db['user']};"
|
| 210 |
+
)
|
| 211 |
+
subprocess.run(
|
| 212 |
+
["su", "-", "postgres", "-c", f"psql -c \"{recreate}\""],
|
| 213 |
+
check=False, capture_output=True, timeout=60,
|
| 214 |
+
)
|
| 215 |
+
except Exception as e:
|
| 216 |
+
logger.warning(f"DB recreate via su postgres failed (continuing): {e}")
|
| 217 |
+
|
| 218 |
+
cmd = [
|
| 219 |
+
"psql",
|
| 220 |
+
f"--host={db['host']}",
|
| 221 |
+
f"--port={db['port']}",
|
| 222 |
+
f"--username={db['user']}",
|
| 223 |
+
"--no-password",
|
| 224 |
+
"--single-transaction",
|
| 225 |
+
db["database"],
|
| 226 |
+
]
|
| 227 |
+
|
| 228 |
+
try:
|
| 229 |
+
with open(sql_file, "r") as f:
|
| 230 |
+
result = subprocess.run(
|
| 231 |
+
cmd, stdin=f, stdout=subprocess.DEVNULL, stderr=subprocess.PIPE,
|
| 232 |
+
env=env_with_password(db), timeout=600,
|
| 233 |
+
)
|
| 234 |
+
if result.returncode != 0:
|
| 235 |
+
logger.error(f"psql restore failed: {result.stderr.decode('utf-8', errors='ignore')[:2000]}")
|
| 236 |
+
return False
|
| 237 |
+
return True
|
| 238 |
+
except subprocess.TimeoutExpired:
|
| 239 |
+
logger.error("psql restore timed out (>600s)")
|
| 240 |
+
return False
|
| 241 |
+
except Exception as e:
|
| 242 |
+
logger.error(f"Database restore error: {e}")
|
| 243 |
+
return False
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
def download_and_restore() -> bool | None:
|
| 247 |
+
if not HF_TOKEN:
|
| 248 |
+
logger.warning("HF_TOKEN not set — skipping restore")
|
| 249 |
+
return False
|
| 250 |
+
try:
|
| 251 |
+
api = HfApi(token=HF_TOKEN)
|
| 252 |
+
username = HF_USERNAME or api.whoami().get("name")
|
| 253 |
+
if not username:
|
| 254 |
+
return False
|
| 255 |
+
dataset_id = f"{username}/{BACKUP_DATASET_NAME}"
|
| 256 |
+
temp_dir = tempfile.mkdtemp()
|
| 257 |
+
try:
|
| 258 |
+
snapshot = api.hf_hub_download(
|
| 259 |
+
repo_id=dataset_id, repo_type="dataset",
|
| 260 |
+
filename="snapshots/latest.tar.gz", local_dir=temp_dir,
|
| 261 |
+
local_dir_use_symlinks=False,
|
| 262 |
+
)
|
| 263 |
+
except (RepositoryNotFoundError, EntryNotFoundError):
|
| 264 |
+
logger.info(f"No backup yet in {dataset_id} — fresh instance")
|
| 265 |
+
return None
|
| 266 |
+
|
| 267 |
+
with tarfile.open(snapshot, "r:gz") as tar:
|
| 268 |
+
tar.extractall(temp_dir, filter="data")
|
| 269 |
+
|
| 270 |
+
sql = Path(temp_dir) / "postiz.sql"
|
| 271 |
+
if not sql.exists():
|
| 272 |
+
logger.error("postiz.sql not found in backup tarball")
|
| 273 |
+
return False
|
| 274 |
+
|
| 275 |
+
# Restore secrets FIRST so DB password matches what's about to be
|
| 276 |
+
# used during the restore (otherwise psql auth fails).
|
| 277 |
+
secrets_src = Path(temp_dir) / ".secrets"
|
| 278 |
+
if secrets_src.exists():
|
| 279 |
+
SECRETS_DIR.mkdir(parents=True, exist_ok=True)
|
| 280 |
+
for item in secrets_src.iterdir():
|
| 281 |
+
target = SECRETS_DIR / item.name
|
| 282 |
+
try:
|
| 283 |
+
if target.exists():
|
| 284 |
+
target.unlink()
|
| 285 |
+
shutil.copy2(item, target)
|
| 286 |
+
target.chmod(0o600)
|
| 287 |
+
except Exception as e:
|
| 288 |
+
logger.warning(f"Failed to restore secret {item.name}: {e}")
|
| 289 |
+
|
| 290 |
+
# Restore uploads
|
| 291 |
+
uploads_src = Path(temp_dir) / "uploads"
|
| 292 |
+
if uploads_src.exists():
|
| 293 |
+
UPLOADS_DIR.mkdir(parents=True, exist_ok=True)
|
| 294 |
+
for item in uploads_src.iterdir():
|
| 295 |
+
target = UPLOADS_DIR / item.name
|
| 296 |
+
try:
|
| 297 |
+
if target.exists():
|
| 298 |
+
if target.is_dir():
|
| 299 |
+
shutil.rmtree(target)
|
| 300 |
+
else:
|
| 301 |
+
target.unlink()
|
| 302 |
+
if item.is_dir():
|
| 303 |
+
shutil.copytree(item, target)
|
| 304 |
+
else:
|
| 305 |
+
shutil.copy2(item, target)
|
| 306 |
+
except Exception as e:
|
| 307 |
+
logger.warning(f"Failed to restore upload {item.name}: {e}")
|
| 308 |
+
|
| 309 |
+
return restore_database(str(sql))
|
| 310 |
+
except Exception as e:
|
| 311 |
+
logger.error(f"Restore from HF failed: {e}")
|
| 312 |
+
return False
|
| 313 |
+
|
| 314 |
+
|
| 315 |
+
# ── Public CLI ───────────────────────────────────────────────────────────────
|
| 316 |
+
def cmd_sync() -> bool:
|
| 317 |
+
logger.info("Syncing backup to HF Dataset...")
|
| 318 |
+
status = read_status()
|
| 319 |
+
try:
|
| 320 |
+
dump, ok = backup_database()
|
| 321 |
+
if not ok:
|
| 322 |
+
status.update({"last_error": "pg_dump failed", "db_status": "error"})
|
| 323 |
+
write_status(status); return False
|
| 324 |
+
tarball, ok = create_backup_tarball(dump)
|
| 325 |
+
if not ok:
|
| 326 |
+
status.update({"last_error": "tarball creation failed", "db_status": "error"})
|
| 327 |
+
write_status(status); return False
|
| 328 |
+
ok = upload_to_hf(tarball)
|
| 329 |
+
status["last_sync_time"] = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
|
| 330 |
+
status["db_status"] = "connected" if ok else "error"
|
| 331 |
+
status["last_error"] = None if ok else "Upload failed"
|
| 332 |
+
status["sync_count"] = status.get("sync_count", 0) + 1
|
| 333 |
+
write_status(status)
|
| 334 |
+
logger.info("Backup synced OK" if ok else "Backup sync failed")
|
| 335 |
+
return ok
|
| 336 |
+
except Exception as e:
|
| 337 |
+
logger.error(f"Backup operation failed: {e}")
|
| 338 |
+
status.update({"last_error": str(e), "db_status": "error"})
|
| 339 |
+
write_status(status)
|
| 340 |
+
return False
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
def cmd_restore() -> bool:
|
| 344 |
+
logger.info("Restoring from HF Dataset...")
|
| 345 |
+
status = read_status()
|
| 346 |
+
try:
|
| 347 |
+
result = download_and_restore()
|
| 348 |
+
if result is None:
|
| 349 |
+
status.update({"db_status": "connected", "last_error": None})
|
| 350 |
+
write_status(status)
|
| 351 |
+
logger.info("No prior backup — fresh instance")
|
| 352 |
+
return True
|
| 353 |
+
if result:
|
| 354 |
+
status.update({"db_status": "connected", "last_error": None})
|
| 355 |
+
write_status(status)
|
| 356 |
+
logger.info("Restore OK")
|
| 357 |
+
return True
|
| 358 |
+
status.update({"db_status": "error", "last_error": "Restore failed"})
|
| 359 |
+
write_status(status)
|
| 360 |
+
return False
|
| 361 |
+
except Exception as e:
|
| 362 |
+
logger.error(f"Restore operation failed: {e}")
|
| 363 |
+
status.update({"last_error": str(e), "db_status": "error"})
|
| 364 |
+
write_status(status)
|
| 365 |
+
return False
|
| 366 |
+
|
| 367 |
+
|
| 368 |
+
def main():
|
| 369 |
+
if len(sys.argv) < 2:
|
| 370 |
+
print("Usage: postiz-sync.py {sync|restore}")
|
| 371 |
+
sys.exit(1)
|
| 372 |
+
cmd = sys.argv[1]
|
| 373 |
+
if cmd == "sync":
|
| 374 |
+
sys.exit(0 if cmd_sync() else 1)
|
| 375 |
+
if cmd == "restore":
|
| 376 |
+
sys.exit(0 if cmd_restore() else 1)
|
| 377 |
+
print(f"Unknown command: {cmd}")
|
| 378 |
+
sys.exit(1)
|
| 379 |
+
|
| 380 |
+
|
| 381 |
+
if __name__ == "__main__":
|
| 382 |
+
main()
|
setup-uptimerobot.sh
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
set -euo pipefail
|
| 3 |
+
|
| 4 |
+
# Create or update a UptimeRobot monitor for this Hugging Face Space.
|
| 5 |
+
#
|
| 6 |
+
# Requirements:
|
| 7 |
+
# - UPTIMEROBOT_API_KEY: Main API key from UptimeRobot
|
| 8 |
+
# - SPACE_HOST or first CLI arg: your HF Space host, e.g. "user-space.hf.space"
|
| 9 |
+
#
|
| 10 |
+
# Optional:
|
| 11 |
+
# - UPTIMEROBOT_MONITOR_NAME: friendly name for the monitor
|
| 12 |
+
# - UPTIMEROBOT_ALERT_CONTACTS: dash-separated alert contact IDs, e.g. "123456-789012"
|
| 13 |
+
# - UPTIMEROBOT_INTERVAL: monitoring interval in minutes (subject to account limits)
|
| 14 |
+
|
| 15 |
+
API_URL="https://api.uptimerobot.com/v2"
|
| 16 |
+
API_KEY="${UPTIMEROBOT_API_KEY:-}"
|
| 17 |
+
SPACE_HOST_INPUT="${1:-${SPACE_HOST:-}}"
|
| 18 |
+
|
| 19 |
+
if [ -z "$API_KEY" ]; then
|
| 20 |
+
echo "Missing UPTIMEROBOT_API_KEY."
|
| 21 |
+
echo "Use the Main API key from UptimeRobot -> Integrations."
|
| 22 |
+
echo "Do not use the Read-only API key or a Monitor-specific API key."
|
| 23 |
+
exit 1
|
| 24 |
+
fi
|
| 25 |
+
|
| 26 |
+
if [ -z "$SPACE_HOST_INPUT" ]; then
|
| 27 |
+
echo "Missing Space host."
|
| 28 |
+
echo "Usage: UPTIMEROBOT_API_KEY=... ./setup-uptimerobot.sh your-space.hf.space"
|
| 29 |
+
exit 1
|
| 30 |
+
fi
|
| 31 |
+
|
| 32 |
+
SPACE_HOST_CLEAN="${SPACE_HOST_INPUT#https://}"
|
| 33 |
+
SPACE_HOST_CLEAN="${SPACE_HOST_CLEAN#http://}"
|
| 34 |
+
SPACE_HOST_CLEAN="${SPACE_HOST_CLEAN%%/*}"
|
| 35 |
+
|
| 36 |
+
MONITOR_URL="https://${SPACE_HOST_CLEAN}/health"
|
| 37 |
+
MONITOR_NAME="${UPTIMEROBOT_MONITOR_NAME:-HuggingPost ${SPACE_HOST_CLEAN}}"
|
| 38 |
+
INTERVAL="${UPTIMEROBOT_INTERVAL:-5}"
|
| 39 |
+
|
| 40 |
+
echo "Checking existing UptimeRobot monitors for ${MONITOR_URL}..."
|
| 41 |
+
MONITORS_RESPONSE=$(curl -sS -X POST "${API_URL}/getMonitors" \
|
| 42 |
+
-d "api_key=${API_KEY}" \
|
| 43 |
+
-d "format=json" \
|
| 44 |
+
-d "logs=0" \
|
| 45 |
+
-d "response_times=0" \
|
| 46 |
+
-d "response_times_limit=1")
|
| 47 |
+
|
| 48 |
+
MONITOR_ID=$(printf '%s' "$MONITORS_RESPONSE" | jq -r --arg url "$MONITOR_URL" '
|
| 49 |
+
(.monitors // []) | map(select(.url == $url)) | first | .id // empty
|
| 50 |
+
')
|
| 51 |
+
|
| 52 |
+
if [ -n "$MONITOR_ID" ]; then
|
| 53 |
+
echo "Monitor already exists (id=${MONITOR_ID}) for ${MONITOR_URL}"
|
| 54 |
+
exit 0
|
| 55 |
+
fi
|
| 56 |
+
|
| 57 |
+
echo "Creating new UptimeRobot monitor for ${MONITOR_URL}..."
|
| 58 |
+
|
| 59 |
+
CURL_ARGS=(
|
| 60 |
+
-sS
|
| 61 |
+
-X POST "${API_URL}/newMonitor"
|
| 62 |
+
-d "api_key=${API_KEY}"
|
| 63 |
+
-d "format=json"
|
| 64 |
+
-d "type=1"
|
| 65 |
+
-d "friendly_name=${MONITOR_NAME}"
|
| 66 |
+
-d "url=${MONITOR_URL}"
|
| 67 |
+
-d "interval=${INTERVAL}"
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
if [ -n "${UPTIMEROBOT_ALERT_CONTACTS:-}" ]; then
|
| 71 |
+
CURL_ARGS+=(-d "alert_contacts=${UPTIMEROBOT_ALERT_CONTACTS}")
|
| 72 |
+
fi
|
| 73 |
+
|
| 74 |
+
CREATE_RESPONSE=$(curl "${CURL_ARGS[@]}")
|
| 75 |
+
CREATE_STATUS=$(printf '%s' "$CREATE_RESPONSE" | jq -r '.stat // "fail"')
|
| 76 |
+
|
| 77 |
+
if [ "$CREATE_STATUS" != "ok" ]; then
|
| 78 |
+
echo "Failed to create monitor."
|
| 79 |
+
printf '%s\n' "$CREATE_RESPONSE"
|
| 80 |
+
exit 1
|
| 81 |
+
fi
|
| 82 |
+
|
| 83 |
+
NEW_ID=$(printf '%s' "$CREATE_RESPONSE" | jq -r '.monitor.id // empty')
|
| 84 |
+
echo "Created UptimeRobot monitor ${NEW_ID:-"(id unavailable)"} for ${MONITOR_URL}"
|
start.sh
ADDED
|
@@ -0,0 +1,244 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
# ============================================================================
|
| 3 |
+
# HuggingPost orchestrator
|
| 4 |
+
#
|
| 5 |
+
# Boot order:
|
| 6 |
+
# 1. Compute env (DB_URL, REDIS_URL, FRONTEND_URL, basePath-aware backend URL)
|
| 7 |
+
# 2. Persist or generate JWT_SECRET, DB password
|
| 8 |
+
# 3. Init Postgres data dir if empty, start postgres, create user + DB
|
| 9 |
+
# 4. Start Redis
|
| 10 |
+
# 5. Restore DB + uploads + secrets from HF Dataset (if HF_TOKEN set)
|
| 11 |
+
# 6. Background: HF Dataset sync loop
|
| 12 |
+
# 7. Background: nginx + PM2 (the 4 Postiz procs — same CMD as upstream)
|
| 13 |
+
# 8. Foreground: health-server.js on port 7860
|
| 14 |
+
# 9. SIGTERM → final sync → graceful exit
|
| 15 |
+
# ============================================================================
|
| 16 |
+
|
| 17 |
+
set -euo pipefail
|
| 18 |
+
umask 0077
|
| 19 |
+
|
| 20 |
+
# ── Paths ────────────────────────────────────────────────────────────────────
|
| 21 |
+
POSTIZ_HOME="/postiz"
|
| 22 |
+
POSTIZ_DIR="/app"
|
| 23 |
+
PGDATA="${POSTIZ_HOME}/pgdata"
|
| 24 |
+
SECRETS_DIR="${POSTIZ_HOME}/.secrets"
|
| 25 |
+
JWT_SECRET_FILE="${SECRETS_DIR}/jwt-secret"
|
| 26 |
+
DB_PASSWORD_FILE="${SECRETS_DIR}/db-password"
|
| 27 |
+
mkdir -p "${POSTIZ_HOME}/uploads" "${POSTIZ_HOME}/redis" "${SECRETS_DIR}"
|
| 28 |
+
|
| 29 |
+
# ── Public URL ───────────────────────────────────────────────────────────────
|
| 30 |
+
if [ -n "${SPACE_HOST:-}" ]; then
|
| 31 |
+
PUBLIC_URL="https://${SPACE_HOST}"
|
| 32 |
+
else
|
| 33 |
+
PUBLIC_URL="${PUBLIC_URL:-http://localhost:7860}"
|
| 34 |
+
fi
|
| 35 |
+
|
| 36 |
+
# ── JWT_SECRET (persist across restarts) ─────────────────────────────────────
|
| 37 |
+
if [ -z "${JWT_SECRET:-}" ]; then
|
| 38 |
+
if [ -f "${JWT_SECRET_FILE}" ]; then
|
| 39 |
+
JWT_SECRET=$(cat "${JWT_SECRET_FILE}")
|
| 40 |
+
else
|
| 41 |
+
JWT_SECRET=$(openssl rand -base64 48 | tr -d '\n')
|
| 42 |
+
printf '%s' "${JWT_SECRET}" > "${JWT_SECRET_FILE}"
|
| 43 |
+
chmod 600 "${JWT_SECRET_FILE}"
|
| 44 |
+
fi
|
| 45 |
+
export JWT_SECRET
|
| 46 |
+
fi
|
| 47 |
+
|
| 48 |
+
# ── DB password (random hex, persisted) ──────────────────────────────────────
|
| 49 |
+
if [ -f "${DB_PASSWORD_FILE}" ]; then
|
| 50 |
+
DB_PASSWORD=$(cat "${DB_PASSWORD_FILE}")
|
| 51 |
+
else
|
| 52 |
+
DB_PASSWORD=$(openssl rand -hex 24)
|
| 53 |
+
printf '%s' "${DB_PASSWORD}" > "${DB_PASSWORD_FILE}"
|
| 54 |
+
chmod 600 "${DB_PASSWORD_FILE}"
|
| 55 |
+
fi
|
| 56 |
+
export PGPASSWORD="${DB_PASSWORD}"
|
| 57 |
+
|
| 58 |
+
# ── Postiz env (UI mounted at /app, API at /app/api) ────────────────────────
|
| 59 |
+
# basePath="/app" was patched into apps/frontend/next.config.js at build time,
|
| 60 |
+
# so Next.js generates URLs prefixed with /app. NEXT_PUBLIC_BACKEND_URL must
|
| 61 |
+
# include /app/api so frontend code calls the right path; health-server
|
| 62 |
+
# strips /app before passing to nginx :5000, which then routes /api → backend
|
| 63 |
+
# (port 3000) and /uploads → file system.
|
| 64 |
+
export DATABASE_URL="${DATABASE_URL:-postgresql://postiz:${DB_PASSWORD}@localhost:5432/postiz}"
|
| 65 |
+
export REDIS_URL="${REDIS_URL:-redis://localhost:6379}"
|
| 66 |
+
export FRONTEND_URL="${FRONTEND_URL:-${PUBLIC_URL}/app}"
|
| 67 |
+
export NEXT_PUBLIC_BACKEND_URL="${NEXT_PUBLIC_BACKEND_URL:-${PUBLIC_URL}/app/api}"
|
| 68 |
+
export BACKEND_INTERNAL_URL="${BACKEND_INTERNAL_URL:-http://localhost:3000}"
|
| 69 |
+
export STORAGE_PROVIDER="${STORAGE_PROVIDER:-local}"
|
| 70 |
+
export UPLOAD_DIRECTORY="${UPLOAD_DIRECTORY:-${POSTIZ_HOME}/uploads}"
|
| 71 |
+
export NEXT_PUBLIC_UPLOAD_STATIC_DIRECTORY="${NEXT_PUBLIC_UPLOAD_STATIC_DIRECTORY:-/app/uploads}"
|
| 72 |
+
export IS_GENERAL="${IS_GENERAL:-true}"
|
| 73 |
+
export NX_ADD_PLUGINS="${NX_ADD_PLUGINS:-false}"
|
| 74 |
+
export NODE_ENV="${NODE_ENV:-production}"
|
| 75 |
+
|
| 76 |
+
# Sync config
|
| 77 |
+
export SYNC_INTERVAL="${SYNC_INTERVAL:-300}"
|
| 78 |
+
export SYNC_MAX_FILE_BYTES="${SYNC_MAX_FILE_BYTES:-104857600}"
|
| 79 |
+
export BACKUP_DATASET_NAME="${BACKUP_DATASET_NAME:-huggingpost-backup}"
|
| 80 |
+
|
| 81 |
+
# ── Banner ───────────────────────────────────────────────────────────────────
|
| 82 |
+
echo ""
|
| 83 |
+
echo " ╔════════════════════════════════════╗"
|
| 84 |
+
echo " ║ HuggingPost ║"
|
| 85 |
+
echo " ║ Postiz on Hugging Face Spaces ║"
|
| 86 |
+
echo " ╚════════════════════════════════════╝"
|
| 87 |
+
echo ""
|
| 88 |
+
echo "Public host : ${SPACE_HOST:-not detected}"
|
| 89 |
+
echo "Dashboard : ${PUBLIC_URL}/"
|
| 90 |
+
echo "Postiz UI : ${PUBLIC_URL}/app/"
|
| 91 |
+
echo "Postiz API : ${PUBLIC_URL}/app/api/"
|
| 92 |
+
echo "Sync every : ${SYNC_INTERVAL}s"
|
| 93 |
+
echo "HF backup : ${HF_TOKEN:+enabled}${HF_TOKEN:-disabled (no HF_TOKEN)}"
|
| 94 |
+
echo ""
|
| 95 |
+
|
| 96 |
+
# ── Postgres ─────────────────────────────────────────────────────────────────
|
| 97 |
+
PG_BIN="/usr/libexec/postgresql16"
|
| 98 |
+
[ -x "${PG_BIN}/postgres" ] || PG_BIN="/usr/bin"
|
| 99 |
+
|
| 100 |
+
if [ ! -f "${PGDATA}/PG_VERSION" ]; then
|
| 101 |
+
echo "Initializing Postgres cluster at ${PGDATA}..."
|
| 102 |
+
chown -R postgres:postgres "${PGDATA}"
|
| 103 |
+
su-exec postgres "${PG_BIN}/initdb" -D "${PGDATA}" --locale=C.UTF-8 --encoding=UTF8 >/dev/null
|
| 104 |
+
echo "host all all 127.0.0.1/32 scram-sha-256" >> "${PGDATA}/pg_hba.conf"
|
| 105 |
+
fi
|
| 106 |
+
|
| 107 |
+
chown -R postgres:postgres "${PGDATA}"
|
| 108 |
+
|
| 109 |
+
if ! su-exec postgres "${PG_BIN}/pg_ctl" -D "${PGDATA}" status >/dev/null 2>&1; then
|
| 110 |
+
echo "Starting Postgres..."
|
| 111 |
+
su-exec postgres "${PG_BIN}/pg_ctl" -D "${PGDATA}" \
|
| 112 |
+
-l "${POSTIZ_HOME}/pg.log" \
|
| 113 |
+
-o "-c listen_addresses='127.0.0.1' -c unix_socket_directories='/var/run/postgresql'" \
|
| 114 |
+
start >/dev/null
|
| 115 |
+
fi
|
| 116 |
+
|
| 117 |
+
for _ in $(seq 1 30); do
|
| 118 |
+
su-exec postgres pg_isready -h 127.0.0.1 >/dev/null 2>&1 && break
|
| 119 |
+
sleep 1
|
| 120 |
+
done
|
| 121 |
+
|
| 122 |
+
su-exec postgres psql -tAc "SELECT 1 FROM pg_roles WHERE rolname='postiz'" | grep -q 1 \
|
| 123 |
+
|| su-exec postgres psql -c "CREATE ROLE postiz WITH LOGIN PASSWORD '${DB_PASSWORD}';" >/dev/null
|
| 124 |
+
su-exec postgres psql -c "ALTER ROLE postiz WITH PASSWORD '${DB_PASSWORD}';" >/dev/null
|
| 125 |
+
su-exec postgres psql -tAc "SELECT 1 FROM pg_database WHERE datname='postiz'" | grep -q 1 \
|
| 126 |
+
|| su-exec postgres psql -c "CREATE DATABASE postiz OWNER postiz;" >/dev/null
|
| 127 |
+
|
| 128 |
+
echo "Postgres ready"
|
| 129 |
+
|
| 130 |
+
# ── Redis ────────────────────────────────────────────────────────────────────
|
| 131 |
+
echo "Starting Redis..."
|
| 132 |
+
redis-server --daemonize yes \
|
| 133 |
+
--bind 127.0.0.1 \
|
| 134 |
+
--port 6379 \
|
| 135 |
+
--appendonly yes \
|
| 136 |
+
--dir "${POSTIZ_HOME}/redis" \
|
| 137 |
+
--logfile /tmp/redis.log
|
| 138 |
+
|
| 139 |
+
for _ in $(seq 1 10); do
|
| 140 |
+
redis-cli -h 127.0.0.1 -p 6379 ping 2>/dev/null | grep -q PONG && break
|
| 141 |
+
sleep 1
|
| 142 |
+
done
|
| 143 |
+
echo "Redis ready"
|
| 144 |
+
|
| 145 |
+
# ── Restore from HF Dataset ──────────────────────────────────────────────────
|
| 146 |
+
if [ -n "${HF_TOKEN:-}" ]; then
|
| 147 |
+
echo "Restoring persisted data from HF Dataset..."
|
| 148 |
+
python3 /opt/postiz-sync.py restore 2>&1 || true
|
| 149 |
+
if [ -f "${DB_PASSWORD_FILE}" ]; then
|
| 150 |
+
DB_PASSWORD=$(cat "${DB_PASSWORD_FILE}")
|
| 151 |
+
export PGPASSWORD="${DB_PASSWORD}"
|
| 152 |
+
export DATABASE_URL="postgresql://postiz:${DB_PASSWORD}@localhost:5432/postiz"
|
| 153 |
+
fi
|
| 154 |
+
su-exec postgres psql -c "ALTER ROLE postiz WITH PASSWORD '${DB_PASSWORD}';" >/dev/null 2>&1 || true
|
| 155 |
+
else
|
| 156 |
+
echo "HF_TOKEN not set — running without backup persistence"
|
| 157 |
+
echo " Add HF_TOKEN as a Space secret to enable DB+uploads backup."
|
| 158 |
+
fi
|
| 159 |
+
|
| 160 |
+
# ── Cloudflare proxy bootstrap ───────────────────────────────────────────────
|
| 161 |
+
if [ -n "${CLOUDFLARE_WORKERS_TOKEN:-}" ]; then
|
| 162 |
+
echo "Setting up Cloudflare proxy..."
|
| 163 |
+
python3 /opt/cloudflare-proxy-setup.py 2>&1 || echo "Cloudflare setup failed; continuing without proxy"
|
| 164 |
+
fi
|
| 165 |
+
|
| 166 |
+
_CF_ENV="/tmp/huggingpost-cloudflare-proxy.env"
|
| 167 |
+
if [ -f "${_CF_ENV}" ]; then
|
| 168 |
+
# shellcheck source=/dev/null
|
| 169 |
+
. "${_CF_ENV}"
|
| 170 |
+
fi
|
| 171 |
+
|
| 172 |
+
if [ -n "${CLOUDFLARE_PROXY_URL:-}" ] && [ -f /opt/cloudflare-proxy.js ]; then
|
| 173 |
+
export NODE_OPTIONS="${NODE_OPTIONS:-} --require /opt/cloudflare-proxy.js"
|
| 174 |
+
fi
|
| 175 |
+
|
| 176 |
+
# ── Background HF sync loop ──────────────────────────────────────────────────
|
| 177 |
+
SYNC_PID=""
|
| 178 |
+
if [ -n "${HF_TOKEN:-}" ]; then
|
| 179 |
+
(
|
| 180 |
+
while true; do
|
| 181 |
+
sleep "$SYNC_INTERVAL"
|
| 182 |
+
python3 /opt/postiz-sync.py sync 2>&1 || true
|
| 183 |
+
done
|
| 184 |
+
) &
|
| 185 |
+
SYNC_PID=$!
|
| 186 |
+
fi
|
| 187 |
+
|
| 188 |
+
# ── Health server (public port 7860) ─────────────────────────────────────────
|
| 189 |
+
node /opt/healthsrv/health-server.js &
|
| 190 |
+
HEALTH_PID=$!
|
| 191 |
+
sleep 1
|
| 192 |
+
|
| 193 |
+
# ── Postiz: nginx + PM2 (mirrors upstream CMD `nginx && pnpm run pm2`) ───────
|
| 194 |
+
# pm2-run script does: pm2 delete all || true && pnpm run prisma-db-push
|
| 195 |
+
# && pnpm run --parallel pm2 && pm2 logs
|
| 196 |
+
echo "Starting nginx + Postiz PM2 procs..."
|
| 197 |
+
cd "${POSTIZ_DIR}"
|
| 198 |
+
( nginx && pnpm run pm2 2>&1 | sed 's/^/[postiz] /' ) &
|
| 199 |
+
POSTIZ_PID=$!
|
| 200 |
+
|
| 201 |
+
echo "Waiting for nginx (port 5000)..."
|
| 202 |
+
for i in $(seq 1 90); do
|
| 203 |
+
if curl -sf -m 2 http://127.0.0.1:5000/ >/dev/null 2>&1; then
|
| 204 |
+
echo "Postiz ready (~$((i*2))s)"
|
| 205 |
+
break
|
| 206 |
+
fi
|
| 207 |
+
sleep 2
|
| 208 |
+
done
|
| 209 |
+
|
| 210 |
+
echo ""
|
| 211 |
+
echo " ┌─────────────────────────────────────────────────────┐"
|
| 212 |
+
echo " │ HuggingPost is live! │"
|
| 213 |
+
echo " │ │"
|
| 214 |
+
echo " │ Dashboard : ${PUBLIC_URL}/"
|
| 215 |
+
echo " │ Postiz : ${PUBLIC_URL}/app/"
|
| 216 |
+
echo " �� │"
|
| 217 |
+
echo " │ Sign up to create the first admin account. │"
|
| 218 |
+
echo " └─────────────────────────────────────────────────────┘"
|
| 219 |
+
echo ""
|
| 220 |
+
|
| 221 |
+
# ── Graceful shutdown ────────────────────────────────────────────────────────
|
| 222 |
+
cleanup() {
|
| 223 |
+
echo "Shutting down — running final sync..."
|
| 224 |
+
[ -n "${HEALTH_PID:-}" ] && kill "$HEALTH_PID" 2>/dev/null || true
|
| 225 |
+
[ -n "${POSTIZ_PID:-}" ] && kill "$POSTIZ_PID" 2>/dev/null || true
|
| 226 |
+
pm2 kill >/dev/null 2>&1 || true
|
| 227 |
+
nginx -s quit 2>/dev/null || true
|
| 228 |
+
|
| 229 |
+
if [ -n "${SYNC_PID:-}" ]; then
|
| 230 |
+
kill "$SYNC_PID" 2>/dev/null || true
|
| 231 |
+
wait "$SYNC_PID" 2>/dev/null || true
|
| 232 |
+
fi
|
| 233 |
+
|
| 234 |
+
if [ -n "${HF_TOKEN:-}" ]; then
|
| 235 |
+
python3 /opt/postiz-sync.py sync 2>&1 || true
|
| 236 |
+
fi
|
| 237 |
+
|
| 238 |
+
redis-cli -h 127.0.0.1 -p 6379 shutdown nosave 2>/dev/null || true
|
| 239 |
+
su-exec postgres "${PG_BIN}/pg_ctl" -D "${PGDATA}" stop -m fast 2>/dev/null || true
|
| 240 |
+
exit 0
|
| 241 |
+
}
|
| 242 |
+
trap cleanup SIGTERM SIGINT
|
| 243 |
+
|
| 244 |
+
wait "$POSTIZ_PID"
|