import OpenAI from "openai";
import { ModelConfig } from "../types/schema";
import { ChatCompletionMessageParam } from "openai/resources/chat/completions.mjs";


export const llmOnceCall = async (
  modelConfig: ModelConfig,
  messages: ChatCompletionMessageParam[],
) => {
  if (!modelConfig?.apiKey || !modelConfig?.baseUrl || !modelConfig?.model) {
    throw new Error('模型未指定');
  }

  const client = new OpenAI({
    apiKey: modelConfig.apiKey,
    baseURL: modelConfig.baseUrl,
  });

  const res = await client.chat.completions.create({
    model: modelConfig.model,
    messages: messages,
    ...modelConfig.parameters
  });
  return res.choices[0]?.message?.content || '';
}

export async function llmOnceStreamingCall(
  modelConfig: ModelConfig,
  messages: ChatCompletionMessageParam[],
) {
  if (!modelConfig?.apiKey || !modelConfig?.baseUrl || !modelConfig?.model) {
    throw new Error('模型未指定');
  }

  const client = new OpenAI({
    apiKey: modelConfig.apiKey,
    baseURL: modelConfig.baseUrl,
  });
      
  return await client.chat.completions.create({
    model: modelConfig.model,
    messages: messages,
    stream: true,
    ...modelConfig.parameters
  });
}