import { ChatOllama } from '@langchain/ollama'
import { ChatOpenAI } from '@langchain/openai'
import { IModelConfig } from '../../configs/runtimeConfig'
import { IMessage } from './interfaces'
// import { runtimeConfig } from '../../configs/runtimeConfig'

interface ISDKConfig {
    outputJSON: boolean
}

// 通过ollamaSdk调用本地的大模型
async function* ollamaChat(
    messages: IMessage[],
    config: IModelConfig,
    sdkConfig?: ISDKConfig
) {
    console.log(messages)
    const conf: any = {
        baseUrl: config.baseURL,
        model: config.model,
        streaming: true,
    }
    if (sdkConfig?.outputJSON) {
        conf.format = 'json'
    }
    const llm = new ChatOllama(conf)
    const response = await llm.stream(messages)
    for await (const part of response) {
        console.log(part.content)
        yield part.content
    }
}

// 通过openaiSdk调用大模型
async function* openAiChat(
    messages: IMessage[],
    config: IModelConfig,
    sdkConfig?: ISDKConfig
) {
    console.log(messages)
    const conf = {
        apiKey: config.apiKey, // This is the default and can be omitted
        model: config.model,
        streaming: true,
        configuration: {
            baseURL: config.baseURL,
        },
    }
    const client = new ChatOpenAI(conf)

    const response = await client.stream(messages, {
        response_format: sdkConfig?.outputJSON
            ? { type: 'json_object' }
            : undefined,
    })

    // const response = await client.chat.completions.create({
    //     model: config.model,
    //     // model: 'glm-4',
    //     messages,
    //     stream: true,
    // })
    for await (const part of response) {
        console.log(part.content)
        yield part.content
    }
}

export const chatHandlerMap = {
    ollama: ollamaChat,
    openai: openAiChat,
}
