package llm

import (
    llama "github.com/go-skynet/go-llama.cpp"
)

type Model struct {
    llm *llama.LLama
}

func Load(modelPath string) (*Model, error) {
    m := llama.LoadModel(modelPath, llama.Params{
        ContextSize: 2048,
        Threads:     4,
    })
    return &Model{llm: m}, nil
}

func (m *Model) Predict(prompt string) <-chan llama.PredictResult {
    return m.llm.Predict(llama.PredictOptions{
        Prompt:    prompt,
        MaxTokens: 128,
        Stream:    true,
    })
}

func (m *Model) Close() {
    m.llm.Close()
}