|
import express from 'express'; |
|
import { fal } from '@fal-ai/client'; |
|
|
|
|
|
|
|
const app = express(); |
|
app.use(express.json({ limit: '50mb' })); |
|
app.use(express.urlencoded({ extended: true, limit: '50mb' })); |
|
|
|
const PORT = process.env.PORT || 3000; |
|
|
|
|
|
const PROMPT_LIMIT = 4800; |
|
const SYSTEM_PROMPT_LIMIT = 4800; |
|
|
|
|
|
|
|
|
|
const FAL_SUPPORTED_MODELS = [ |
|
"anthropic/claude-3.7-sonnet", |
|
"anthropic/claude-3.5-sonnet", |
|
"anthropic/claude-3-5-haiku", |
|
"anthropic/claude-3-haiku", |
|
"google/gemini-pro-1.5", |
|
"google/gemini-flash-1.5", |
|
"google/gemini-flash-1.5-8b", |
|
"google/gemini-2.0-flash-001", |
|
"meta-llama/llama-3.2-1b-instruct", |
|
"meta-llama/llama-3.2-3b-instruct", |
|
"meta-llama/llama-3.1-8b-instruct", |
|
"meta-llama/llama-3.1-70b-instruct", |
|
"openai/gpt-4o-mini", |
|
"openai/gpt-4o", |
|
"deepseek/deepseek-r1", |
|
"meta-llama/llama-4-maverick", |
|
"meta-llama/llama-4-scout" |
|
]; |
|
|
|
|
|
const getOwner = (modelId) => { |
|
if (modelId && modelId.includes('/')) { |
|
return modelId.split('/')[0]; |
|
} |
|
return 'fal-ai'; |
|
} |
|
|
|
|
|
app.get('/v1/models', (req, res) => { |
|
console.log("Received request for GET /v1/models"); |
|
try { |
|
const modelsData = FAL_SUPPORTED_MODELS.map(modelId => ({ |
|
id: modelId, object: "model", created: 1700000000, owned_by: getOwner(modelId) |
|
})); |
|
res.json({ object: "list", data: modelsData }); |
|
console.log("Successfully returned model list."); |
|
} catch (error) { |
|
console.error("Error processing GET /v1/models:", error); |
|
res.status(500).json({ error: "Failed to retrieve model list." }); |
|
} |
|
}); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
function convertMessagesToFalPrompt(messages) { |
|
|
|
const filtered_messages = []; |
|
let system_message_content = ""; |
|
|
|
for (const message of messages) { |
|
const content = (message.content === null || message.content === undefined) ? "" : String(message.content).trim(); |
|
if (content.length > 0) { |
|
if (message.role === 'system') { |
|
system_message_content = content; |
|
} else { |
|
filtered_messages.push({ |
|
...message, |
|
content: content |
|
}); |
|
} |
|
} |
|
} |
|
|
|
|
|
if (system_message_content.length > SYSTEM_PROMPT_LIMIT) { |
|
system_message_content = system_message_content.substring(0,SYSTEM_PROMPT_LIMIT) |
|
} |
|
|
|
|
|
if (filtered_messages.length === 0) { |
|
return { |
|
system_prompt: system_message_content, |
|
prompt: "" |
|
}; |
|
} |
|
|
|
|
|
const prompt_messages = filtered_messages.slice(-3); |
|
const remaining_messages = filtered_messages.slice(0, -3); |
|
|
|
|
|
let prompt_parts = []; |
|
|
|
for (const message of prompt_messages) { |
|
if (message.role === 'user') { |
|
prompt_parts.push(String(message.content)); |
|
} else if (message.role === 'assistant') { |
|
prompt_parts.push(`Assistant: ${String(message.content)}`); |
|
} |
|
} |
|
|
|
const final_prompt = prompt_parts.join('\n'); |
|
|
|
|
|
let system_prompt_parts = []; |
|
|
|
|
|
if (system_message_content.length > 0) { |
|
system_prompt_parts.push(system_message_content); |
|
} |
|
|
|
|
|
for (const message of remaining_messages) { |
|
if (message.role === 'user') { |
|
system_prompt_parts.push(`Human: ${String(message.content)}`); |
|
} else if (message.role === 'assistant') { |
|
system_prompt_parts.push(`Assistant: ${String(message.content)}`); |
|
} |
|
} |
|
|
|
let final_system_prompt = system_prompt_parts.join('\n'); |
|
|
|
|
|
if (final_system_prompt.length > SYSTEM_PROMPT_LIMIT) { |
|
|
|
const system_part = system_message_content; |
|
let remaining_space = SYSTEM_PROMPT_LIMIT - system_part.length - 1; |
|
|
|
if (remaining_space <= 0) { |
|
final_system_prompt = system_part; |
|
} else { |
|
const conversation_parts = []; |
|
|
|
|
|
for (let i = remaining_messages.length - 1; i >= 0; i--) { |
|
const message = remaining_messages[i]; |
|
let message_text = ""; |
|
|
|
if (message.role === 'user') { |
|
message_text = `Human: ${String(message.content)}`; |
|
} else if (message.role === 'assistant') { |
|
message_text = `Assistant: ${String(message.content)}`; |
|
} |
|
|
|
if (message_text.length + 1 <= remaining_space) { |
|
conversation_parts.unshift(message_text); |
|
remaining_space -= (message_text.length + 1); |
|
} else { |
|
break; |
|
} |
|
} |
|
|
|
if (system_part.length > 0 && conversation_parts.length > 0) { |
|
final_system_prompt = system_part + '\n' + conversation_parts.join('\n'); |
|
} else if (system_part.length > 0) { |
|
final_system_prompt = system_part; |
|
} else { |
|
final_system_prompt = conversation_parts.join('\n'); |
|
} |
|
} |
|
} |
|
|
|
return { |
|
system_prompt: final_system_prompt, |
|
prompt: final_prompt |
|
}; |
|
} |
|
|
|
function convertMessagesToFalPrompt1(messages) { |
|
let system_message_content = ""; |
|
let prompt =""; |
|
for (const message of messages) { |
|
const content = (message.content === null || message.content === undefined) ? "" : String(message.content).trim(); |
|
if (content.length > 0) { |
|
if (message.role === 'system') { |
|
system_message_content = content; |
|
} else if (message.role === 'user') { |
|
prompt = content; |
|
} |
|
} |
|
} |
|
|
|
return { |
|
system_prompt: system_message_content, |
|
prompt: prompt |
|
}; |
|
} |
|
|
|
app.post('/v1/chat/completions', async (req, res) => { |
|
|
|
let authKey = null; |
|
let authHeader = req.headers.authorization; |
|
if(!authHeader) |
|
{ |
|
authHeader = req.headers["x-app-token"]; |
|
} |
|
if (authHeader) { |
|
const parts = authHeader.split(' '); |
|
if (parts.length === 2) { |
|
const scheme = parts[0]; |
|
const credentials = parts[1]; |
|
|
|
if (scheme === 'Bearer') { |
|
authKey = credentials; |
|
} else if (scheme === 'Basic') { |
|
|
|
const decoded = Buffer.from(credentials, 'base64').toString('utf8'); |
|
const [username, password] = decoded.split(':'); |
|
req.auth = { username, password }; |
|
authKey = decoded; |
|
} else if (scheme === 'ApiKey' || scheme === 'Key') { |
|
authKey = credentials; |
|
} |
|
} |
|
} |
|
|
|
fal.config({ |
|
credentials: authKey, |
|
}); |
|
|
|
const { model, messages, stream = false, reasoning = false, ...restOpenAIParams } = req.body; |
|
|
|
console.log(`Received chat completion request for model: ${model}, stream: ${stream}`); |
|
|
|
if (!FAL_SUPPORTED_MODELS.includes(model)) { |
|
console.warn(`Warning: Requested model '${model}' is not in the explicitly supported list.`); |
|
} |
|
if (!model || !messages || !Array.isArray(messages) || messages.length === 0) { |
|
console.error("Invalid request parameters:", { model, messages: Array.isArray(messages) ? messages.length : typeof messages }); |
|
return res.status(400).json({ error: 'Missing or invalid parameters: model and messages array are required.' }); |
|
} |
|
|
|
try { |
|
|
|
const { prompt, system_prompt } = convertMessagesToFalPrompt1(messages); |
|
|
|
const falInput = { |
|
model: model, |
|
prompt: prompt, |
|
...(system_prompt && { system_prompt: system_prompt }), |
|
reasoning: !!reasoning, |
|
}; |
|
console.log("Fal Input:", JSON.stringify(falInput, null, 2)); |
|
console.log("Forwarding request to fal-ai with system-priority + separator + recency input:"); |
|
console.log("System Prompt Length:", system_prompt?.length || 0); |
|
console.log("Prompt Length:", prompt?.length || 0); |
|
|
|
console.log("--- System Prompt Start ---"); |
|
console.log(system_prompt); |
|
console.log("--- System Prompt End ---"); |
|
console.log("--- Prompt Start ---"); |
|
console.log(prompt); |
|
console.log("--- Prompt End ---"); |
|
|
|
|
|
|
|
if (stream) { |
|
|
|
res.setHeader('Content-Type', 'text/event-stream; charset=utf-8'); |
|
res.setHeader('Cache-Control', 'no-cache'); |
|
res.setHeader('Connection', 'keep-alive'); |
|
res.setHeader('Access-Control-Allow-Origin', '*'); |
|
res.flushHeaders(); |
|
|
|
let previousOutput = ''; |
|
|
|
const falStream = await fal.stream("fal-ai/any-llm", { input: falInput }); |
|
|
|
try { |
|
for await (const event of falStream) { |
|
const currentOutput = (event && typeof event.output === 'string') ? event.output : ''; |
|
const isPartial = (event && typeof event.partial === 'boolean') ? event.partial : true; |
|
const errorInfo = (event && event.error) ? event.error : null; |
|
|
|
if (errorInfo) { |
|
console.error("Error received in fal stream event:", errorInfo); |
|
const errorChunk = { id: `chatcmpl-${Date.now()}-error`, object: "chat.completion.chunk", created: Math.floor(Date.now() / 1000), model: model, choices: [{ index: 0, delta: {}, finish_reason: "error", message: { role: 'assistant', content: `Fal Stream Error: ${JSON.stringify(errorInfo)}` } }] }; |
|
res.write(`data: ${JSON.stringify(errorChunk)}\n\n`); |
|
break; |
|
} |
|
|
|
let deltaContent = ''; |
|
if (currentOutput.startsWith(previousOutput)) { |
|
deltaContent = currentOutput.substring(previousOutput.length); |
|
} else if (currentOutput.length > 0) { |
|
console.warn("Fal stream output mismatch detected. Sending full current output as delta.", { previousLength: previousOutput.length, currentLength: currentOutput.length }); |
|
deltaContent = currentOutput; |
|
previousOutput = ''; |
|
} |
|
previousOutput = currentOutput; |
|
|
|
if (deltaContent || !isPartial) { |
|
const openAIChunk = { id: `chatcmpl-${Date.now()}`, object: "chat.completion.chunk", created: Math.floor(Date.now() / 1000), model: model, choices: [{ index: 0, delta: { content: deltaContent }, finish_reason: isPartial === false ? "stop" : null }] }; |
|
res.write(`data: ${JSON.stringify(openAIChunk)}\n\n`); |
|
} |
|
} |
|
res.write(`data: [DONE]\n\n`); |
|
res.end(); |
|
console.log("Stream finished."); |
|
|
|
} catch (streamError) { |
|
console.error('Error during fal stream processing loop:', streamError); |
|
try { |
|
const errorDetails = (streamError instanceof Error) ? streamError.message : JSON.stringify(streamError); |
|
res.write(`data: ${JSON.stringify({ error: { message: "Stream processing error", type: "proxy_error", details: errorDetails } })}\n\n`); |
|
res.write(`data: [DONE]\n\n`); |
|
res.end(); |
|
} catch (finalError) { |
|
console.error('Error sending stream error message to client:', finalError); |
|
if (!res.writableEnded) { res.end(); } |
|
} |
|
} |
|
} else { |
|
|
|
console.log("Executing non-stream request..."); |
|
const result = await fal.subscribe("fal-ai/any-llm", { input: falInput, logs: true }); |
|
console.log("Received non-stream result from fal-ai:", JSON.stringify(result, null, 2)); |
|
|
|
if (result && result.error) { |
|
console.error("Fal-ai returned an error in non-stream mode:", result.error); |
|
return res.status(500).json({ object: "error", message: `Fal-ai error: ${JSON.stringify(result.error)}`, type: "fal_ai_error", param: null, code: null }); |
|
} |
|
|
|
const openAIResponse = { |
|
id: `chatcmpl-${result.requestId || Date.now()}`, object: "chat.completion", created: Math.floor(Date.now() / 1000), model: model, |
|
choices: [{ index: 0, message: { role: "assistant", content: result.output || "" }, finish_reason: "stop" }], |
|
usage: { prompt_tokens: null, completion_tokens: null, total_tokens: null }, system_fingerprint: null, |
|
...(result.reasoning && { fal_reasoning: result.reasoning }), |
|
}; |
|
res.json(openAIResponse); |
|
console.log("Returned non-stream response."); |
|
} |
|
|
|
} catch (error) { |
|
console.error('Unhandled error in /v1/chat/completions:', error); |
|
if (!res.headersSent) { |
|
const errorMessage = (error instanceof Error) ? error.message : JSON.stringify(error); |
|
res.status(500).json({ error: 'Internal Server Error in Proxy', details: errorMessage }); |
|
} else if (!res.writableEnded) { |
|
console.error("Headers already sent, ending response."); |
|
res.end(); |
|
} |
|
} |
|
}); |
|
|
|
|
|
app.listen(PORT, () => { |
|
console.log(`===================================================`); |
|
console.log(` Fal OpenAI Proxy Server (System Top + Separator + Recency)`); |
|
console.log(` Listening on port: ${PORT}`); |
|
console.log(` Using Limits: System Prompt=${SYSTEM_PROMPT_LIMIT}, Prompt=${PROMPT_LIMIT}`); |
|
console.log(` Chat Completions Endpoint: POST http://localhost:${PORT}/v1/chat/completions`); |
|
console.log(` Models Endpoint: GET http://localhost:${PORT}/v1/models`); |
|
console.log(`===================================================`); |
|
}); |
|
|
|
|
|
app.get('/', (req, res) => { |
|
res.send('Fal OpenAI Proxy (System Top + Separator + Recency Strategy) is running.'); |
|
}); |
|
|