import fs from "fs"; import { Request, Response } from "express"; import showdown from "showdown"; import { config, listConfig } from "./config"; import { keyPool } from "./key-management"; import { getUniqueIps } from "./proxy/rate-limit"; import { QueuePartition, getEstimatedWaitTime, getQueueLength, } from "./proxy/queue"; const INFO_PAGE_TTL = 5000; let infoPageHtml: string | undefined; let infoPageLastUpdated = 0; export const handleInfoPage = (req: Request, res: Response) => { if (infoPageLastUpdated + INFO_PAGE_TTL > Date.now()) { res.send(infoPageHtml); return; } // Sometimes huggingface doesn't send the host header and makes us guess. const baseUrl = process.env.SPACE_ID && !req.get("host")?.includes("hf.space") ? getExternalUrlForHuggingfaceSpaceId(process.env.SPACE_ID) : req.protocol + "://" + req.get("host"); res.send(cacheInfoPageHtml(baseUrl)); }; function cacheInfoPageHtml(baseUrl: string) { const keys = keyPool.list(); const openaiKeys = keys.filter((k) => k.service === "openai").length; const anthropicKeys = keys.filter((k) => k.service === "anthropic").length; const info = { uptime: process.uptime(), endpoints: { ...(openaiKeys ? { openai: baseUrl + "/proxy/openai" } : {}), ...(anthropicKeys ? { anthropic: baseUrl + "/proxy/anthropic" } : {}), }, proompts: keys.reduce((acc, k) => acc + k.promptCount, 0), ...(config.modelRateLimit ? { proomptersNow: getUniqueIps() } : {}), openaiKeys, anthropicKeys, ...(openaiKeys ? getOpenAIInfo() : {}), ...(anthropicKeys ? getAnthropicInfo() : {}), config: listConfig(), build: process.env.BUILD_INFO || "dev", }; const title = getServerTitle(); const headerHtml = buildInfoPageHeader(new showdown.Converter(), title); const pageBody = ` ${title} ${headerHtml}

Service Info

${JSON.stringify(info, null, 2)}
`; infoPageHtml = pageBody; infoPageLastUpdated = Date.now(); return pageBody; } type ServiceInfo = { activeKeys: number; trialKeys?: number; quota: string; proomptersInQueue: number; estimatedQueueTime: string; }; // this has long since outgrown this awful "dump everything in a
 tag" approach
// but I really don't want to spend time on a proper UI for this right now

function getOpenAIInfo() {
  const info: { [model: string]: Partial } = {};
  const keys = keyPool.list().filter((k) => k.service === "openai");
  const hasGpt4 = keys.some((k) => k.isGpt4);

  if (keyPool.anyUnchecked()) {
    const uncheckedKeys = keys.filter((k) => !k.lastChecked);
    info.status = `Still checking ${uncheckedKeys.length} keys...` as any;
  } else {
    delete info.status;
  }

  if (config.checkKeys) {
    const turboKeys = keys.filter((k) => !k.isGpt4 && !k.isDisabled);
    const gpt4Keys = keys.filter((k) => k.isGpt4 && !k.isDisabled);

    const quota: Record = { turbo: "", gpt4: "" };
    const turboQuota = keyPool.remainingQuota("openai") * 100;
    const gpt4Quota = keyPool.remainingQuota("openai", { gpt4: true }) * 100;

    if (config.quotaDisplayMode === "full") {
      const turboUsage = keyPool.usageInUsd("openai");
      const gpt4Usage = keyPool.usageInUsd("openai", { gpt4: true });
      quota.turbo = `${turboUsage} (${Math.round(turboQuota)}% remaining)`;
      quota.gpt4 = `${gpt4Usage} (${Math.round(gpt4Quota)}% remaining)`;
    } else {
      quota.turbo = `${Math.round(turboQuota)}%`;
      quota.gpt4 = `${Math.round(gpt4Quota * 100)}%`;
    }

    info.turbo = {
      activeKeys: turboKeys.filter((k) => !k.isDisabled).length,
      trialKeys: turboKeys.filter((k) => k.isTrial).length,
      quota: quota.turbo,
    };

    if (hasGpt4 && true === false) {
      info.gpt4 = {
        activeKeys: gpt4Keys.filter((k) => !k.isDisabled).length,
        trialKeys: gpt4Keys.filter((k) => k.isTrial).length,
        quota: quota.gpt4,
      };
    }

    if (config.quotaDisplayMode === "none") {
      delete info.turbo?.quota;
      delete info.gpt4?.quota;
    }

    delete info.gpt4?.quota;
  } else {
    info.status = "Key checking is disabled." as any;
    info.turbo = { activeKeys: keys.filter((k) => !k.isDisabled).length };
  }

  if (config.queueMode !== "none") {
    const turboQueue = getQueueInformation("turbo");

    info.turbo.proomptersInQueue = turboQueue.proomptersInQueue;
    info.turbo.estimatedQueueTime = turboQueue.estimatedQueueTime;

    if (hasGpt4 && true === false) {
      const gpt4Queue = getQueueInformation("gpt-4");
      info.gpt4.proomptersInQueue = gpt4Queue.proomptersInQueue;
      info.gpt4.estimatedQueueTime = gpt4Queue.estimatedQueueTime;
    }
  }

  return info;
}

function getAnthropicInfo() {
  const claudeInfo: Partial = {};
  const keys = keyPool.list().filter((k) => k.service === "anthropic");
  claudeInfo.activeKeys = keys.filter((k) => !k.isDisabled).length;
  if (config.queueMode !== "none") {
    const queue = getQueueInformation("claude");
    claudeInfo.proomptersInQueue = queue.proomptersInQueue;
    claudeInfo.estimatedQueueTime = queue.estimatedQueueTime;
  }
  return { claude: claudeInfo };
}

/**
 * If the server operator provides a `greeting.md` file, it will be included in
 * the rendered info page.
 **/
function buildInfoPageHeader(converter: showdown.Converter, title: string) {
  const customGreeting = fs.existsSync("greeting.md")
    ? fs.readFileSync("greeting.md", "utf8")
    : null;

  // TODO: use some templating engine instead of this mess

  let infoBody = `
# ${title}`;
  if (config.promptLogging && true === false) {
    infoBody += `\n## Prompt logging is enabled!
The server operator has enabled prompt logging. The prompts you send to this proxy and the AI responses you receive may be saved.

Logs are anonymous and do not contain IP addresses or timestamps. [You can see the type of data logged here, along with the rest of the code.](https://gitgud.io/khanon/oai-reverse-proxy/-/blob/main/src/prompt-logging/index.ts).

**If you are uncomfortable with this, don't send prompts to this proxy!**`;
  }

  if (config.queueMode !== "none") {
    const waits = [];
    infoBody += `\n## Estimated Wait Times\nIf the AI is busy, your prompt will processed when a slot frees up.`;

    if (config.openaiKey) {
      const turboWait = getQueueInformation("turbo").estimatedQueueTime;
      const gpt4Wait = getQueueInformation("gpt-4").estimatedQueueTime;
      waits.push(`**Turbo:** ${turboWait}`);
      if (keyPool.list().some((k) => k.isGpt4)) {
        waits.push(`**GPT-4:** ${gpt4Wait}`);
      }
    }

    if (config.anthropicKey) {
      const claudeWait = getQueueInformation("claude").estimatedQueueTime;
      waits.push(`**Claude:** ${claudeWait}`);
    }
    infoBody += "\n\n" + waits.join(" / ");
  }

  if (customGreeting) {
    infoBody += `\n## Server Greeting\n
${customGreeting}`;
  }
  return converter.makeHtml(infoBody);
}

/** Returns queue time in seconds, or minutes + seconds if over 60 seconds. */
function getQueueInformation(partition: QueuePartition) {
  if (config.queueMode === "none") {
    return {};
  }
  const waitMs = getEstimatedWaitTime(partition);
  const waitTime =
    waitMs < 60000
      ? `${Math.round(waitMs / 1000)}sec`
      : `${Math.round(waitMs / 60000)}min, ${Math.round(
          (waitMs % 60000) / 1000
        )}sec`;
  return {
    proomptersInQueue: getQueueLength(partition),
    estimatedQueueTime: waitMs > 2000 ? waitTime : "no wait",
  };
}

function getServerTitle() {
  // Use manually set title if available
  if (process.env.SERVER_TITLE) {
    return process.env.SERVER_TITLE;
  }

  // Huggingface
  if (process.env.SPACE_ID) {
    return `${process.env.SPACE_AUTHOR_NAME} / ${process.env.SPACE_TITLE}`;
  }

  // Render
  if (process.env.RENDER) {
    return `Render / ${process.env.RENDER_SERVICE_NAME}`;
  }

  return "OAI Reverse Proxy";
}

function getExternalUrlForHuggingfaceSpaceId(spaceId: string) {
  // Huggingface broke their amazon elb config and no longer sends the
  // x-forwarded-host header. This is a workaround.
  try {
    const [username, spacename] = spaceId.split("/");
    return `https://${username}-${spacename.replace(/_/g, "-")}.hf.space`;
  } catch (e) {
    return "";
  }
}