import { Chroma } from '@langchain/community/vectorstores/chroma'
import { OllamaEmbeddings } from '@langchain/ollama'
import { IModelConfig, runtimeConfig } from '../../configs/runtimeConfig'
import BigModelConfig from '../../models/BigModelConfig'
import KnowledgeBase from '../../models/KnowledgeBase'

export const collectionNameModelConfigCache = new Map<string, IModelConfig>()
export const getEmbeddings = async (collectionName: string) => {

    let modelConfig = collectionNameModelConfigCache.get(collectionName)

    if (!modelConfig) {
        // 从数据库查询
        const kbItem = await KnowledgeBase.findOne({
            where: { collectionName },
            include: {
                model: BigModelConfig,
                as: 'embedModel'
            }
        })
        if (!kbItem || !kbItem.embedModel) {
            throw new Error('知识库不存在')
        }
        modelConfig = kbItem.embedModel.toJSON()
        if (!modelConfig) {
            throw new Error('知识库不存在')
        }
        collectionNameModelConfigCache.set(collectionName, modelConfig)
    }
    console.log('modelConfig', modelConfig.model)
    const embeddings = new OllamaEmbeddings({
        baseUrl: modelConfig.baseURL,
        model: modelConfig.model,
    })
    return embeddings
}

export const getVectorStore = async (collectionName: string) => {
    // const client = new ChromaClient()
    const embeddings = await getEmbeddings(collectionName)
    const vectorStore = new Chroma(embeddings, {
        collectionName,
        url: runtimeConfig.urlConfigs.chromaUrl,
    })

    return vectorStore
}
