ai-comic-factory / src /app /queries /predictWithOpenAI.ts
jbilcke-hf's picture
jbilcke-hf HF staff
let's use two pages
5dd2af5
raw
history blame
997 Bytes
"use server"
import type { ChatCompletionMessage } from "openai/resources/chat"
import OpenAI from "openai"
export async function predict(inputs: string, nbPanels: number): Promise<string> {
const openaiApiKey = `${process.env.AUTH_OPENAI_API_KEY || ""}`
const openaiApiBaseUrl = `${process.env.LLM_OPENAI_API_BASE_URL || "https://api.openai.com/v1"}`
const openaiApiModel = `${process.env.LLM_OPENAI_API_MODEL || "gpt-3.5-turbo"}`
const openai = new OpenAI({
apiKey: openaiApiKey,
baseURL: openaiApiBaseUrl,
})
const messages: ChatCompletionMessage[] = [
{ role: "system", content: inputs },
]
try {
const res = await openai.chat.completions.create({
messages: messages,
stream: false,
model: openaiApiModel,
temperature: 0.8,
// TODO: use the nbPanels to define a max token limit
})
return res.choices[0].message.content || ""
} catch (err) {
console.error(`error during generation: ${err}`)
return ""
}
}