import { ChromaClient, OllamaEmbeddingFunction } from 'chromadb'
import { ChatOllama, OllamaEmbeddings } from '@langchain/ollama'
import { IMessage } from './apis/chat/interfaces'
import { Chroma } from '@langchain/community/vectorstores/chroma'
import type { Document } from '@langchain/core/documents'

const documentsArr = [
    "Llamas are members of the camelid family meaning they're pretty closely related to vicuñas and camels",
    'Llamas were first domesticated and used as pack animals 4,000 to 5,000 years ago in the Peruvian highlands',
    'Llamas can grow as much as 6 feet tall though the average llama between 5 feet 6 inches and 5 feet 9 inches tall',
    'Llamas weigh between 280 and 450 pounds and can carry 25 to 30 percent of their body weight',
    'Llamas are vegetarians and have very efficient digestive systems',
    'Llamas live to be about 20 years old, though some only live for 15 years and others live to be 30 years old',
]

// const test = async () => {
//     const collection = await client.createCollection({
//         name: 'docs',
//     })
//     for (let i = 0; i < documents.length; i++) {
//         const el = documents[i]
//         const emRes = await ollama.embed({
//             model: 'nomic-embed-text',
//             input: el,
//         })
//         await collection.add({
//             ids: [`${i}`],
//             embeddings: emRes.embeddings,
//             documents: [el],
//         })
//     }

//     const input = 'What animals are llamas related to?'

//     const response = await ollama.embed({
//         model: 'nomic-embed-text',
//         input,
//     })

//     const results = await collection.query({
//         queryEmbeddings: response.embeddings,
//         nResults: 1,
//     })
//     console.log(results.documents)
// }


const test2 = async () => {
    // 获取chromadb的collection列表
    // const collections = await client.listCollectionsAndMetadata()
    // console.log(collections)
    const embeddings = new OllamaEmbeddings({
        model: 'nomic-embed-text',
    })

    const client = new ChromaClient()

    const collection = await client.getCollection({
        name: 'docs',
        embeddingFunction: {
            generate: embeddings.embedDocuments
        }
    })

    // 获取所有的documents
    const documents = await collection.peek({
        limit: 9999,
        
    })

    console.log(documents)

    
}

// test2()

// 通过ollamaSdk调用本地的大模型，from langChain
async function ollamaChatLangChain(messages: IMessage[]) {
    const model = new ChatOllama({
        model: 'qwen2.5:1.5b',
        streaming: true,
    })
    const ms = messages.map((v) => ({
        type: v.type,
        content: v.content,
    }))
    const res = await model.stream(ms)

    for await (const it of res) {
        console.log(it.content)
    }

    // const response = await ollama.chat({
    //     model: config.model,
    //     // model: 'qwen2.5:7b-instruct',
    //     messages,
    //     stream: true,
    // })
    // for await (const part of response) {
    //     console.log(part.message.content)
    //     yield part.message.content
    // }
}

// ollamaChatLangChain([
//     {
//         type: 'system',
//         content: 'You are a helpful assistant.',
//     },
//     {
//         type: 'user',
//         content: 'Hello!',
//     },
// ])

// test()

// ollama
//     .embeddings({
//         model: 'nomic-embed-text',
//         prompt: '快速排序是一个很快的排序算法',
//     })
//     .then((res) => {
//         console.log(res)
//     })
