import { MessageType, Assistant, ChatOptions } from '@/types'
import { ChatResponse, Ollama } from 'ollama/browser'
import { createModelConfig } from './openai'

export const modelConfigs = {
  'llama3.3': createModelConfig(16_384, 128_000),
  'llama3.2': createModelConfig(16_384, 128_000),
  'llama3.2:1b': createModelConfig(16_384, 128_000),
}

export type Model = keyof typeof modelConfigs
export const models = Object.keys(modelConfigs).sort() as Model[]

export default class OllamaModel {
  public name = 'Ollama'
  public options: Assistant & ChatOptions
  private client: Ollama

  constructor(options: Assistant & ChatOptions) {
    this.options = {
      ...options,
      apiHost: options.apiHost?.trim() || 'http://localhost:11434',
    }
    this.client = new Ollama({
      host: this.options.apiHost,
    })
  }

  async chat(messages: MessageType[]) {
    const { modelName, ...restOptions } = this.options

    const stream = await this.client.chat({
      messages: messages.map(({ role, content }) => ({ role, content })),
      model: modelName,
      stream: true,
      ...restOptions,
    })

    return {
      stream,
      deepChunk: (chunk: ChatResponse) => {
        return chunk.message.content
      },
      abort: () => {
        this.client.abort()
      },
    }
  }
}
