/**
 * Ollama LLM Provider
 * view: https://github.com/ollama/ollama-js
 */
import type { Message } from 'ollama'
import type { LLMCompletionsData, LLMCompletionsCb, LLMAttachCheckRes } from '@/provider/llm/type'
import { llmBaseProvider } from '@/provider/llm/baseProvider'
// fixed: https://github.com/ollama/ollama-js/issues/189
import { Ollama } from 'ollama/browser'
import {
  geneTitlePrompt,
  clearContent,
  extractBase64Data,
  isBase64Image,
} from '@/provider/llm/hooks/utils'

class OllamaProvider extends llmBaseProvider {
  // 客户端
  private static client: Ollama

  protected init() {
    // 初始化ollama.js
    if (this.getApiKey()) {
      OllamaProvider.client = new Ollama({
        host: this.getBaseUrl(),
        headers: { Authorization: `Bearer ${this.getApiKey()}` },
      })
    } else {
      OllamaProvider.client = new Ollama({
        host: this.getBaseUrl(),
      })
    }
  }

  public async accessCheck() {
    // 尝试调用 api 以判断是否可用
    try {
      await OllamaProvider.client.list()
      return true
    } catch {
      return false
    }
  }

  public attachCheck(model: LLMModel): LLMAttachCheckRes {
    return {
      allowVision: !!model.vision,
      visionLimit: 20 * 1024 * 1024,
    }
  }

  public async getModels() {
    // 若设定了模型，则不通过服务器获取
    if (this.getDefaultModels().length) {
      return this.getDefaultModels()
    }
    const response: LLMModel[] = []
    const models = await OllamaProvider.client.list()
    for (const item of models.models) {
      const info = await OllamaProvider.client.show({
        model: item.name,
      })
      response.push({
        key: item.name,
        type: info.capabilities.includes('embedding') ? 'embedding' : 'llm',
        name: item.name,
        size: item.size,
        toolUse: info.capabilities.includes('tools'),
        vision: info.capabilities.includes('vision'),
      })
    }
    return response
  }

  public async geneTitle(model: string, messages: LLMMessage[]) {
    try {
      const result = await OllamaProvider.client.generate({
        model,
        prompt: geneTitlePrompt(messages),
        options: {
          temperature: 0.3,
          top_p: 0.5,
          num_predict: 100,
          frequency_penalty: 0.7, // 防止重复用词
          presence_penalty: 0.3, // 保持核心术语稳定
        },
      })

      return result.response
        .replace(/<[^>]*>/g, '')
        .replace(/\n/g, '')
        .trim()
    } catch (error) {
      window.console.error(error)
      return ''
    }
  }

  public async completions(data: LLMCompletionsData, callback: LLMCompletionsCb) {
    let content = ''
    const messages: Message[] = []
    for (const item of data.messages) {
      if (item.role !== 'error') {
        const line: Message = {
          role: item.role,
          content: clearContent(item.content),
        }
        // 处理图片请求
        // 此处直接将图片base64进行提交，仅作演示使用，生产环境下建议使用存储服务转储
        if (item.attachs?.length) {
          line.images = [] as string[]
          for (const attach of item.attachs) {
            if (isBase64Image(attach.url)) {
              line.images.push(extractBase64Data(attach.url))
            }
          }
        }
        messages.push(line)
      }
    }
    try {
      const prediction = await OllamaProvider.client.chat({
        model: data.model,
        messages,
        options: {
          temperature: data.temperature,
          num_predict: data.max_tokens || undefined,
        },
        stream: true,
      })
      for await (const part of prediction) {
        content += part.message.content
        callback.onUpdate(content)
      }
      callback.onSuccess(content)
    } catch (error) {
      window.console.error(error)
      if (content) {
        // 打断
        callback.onSuccess(content)
      } else {
        callback.onError(error)
      }
    }
  }

  public async abort() {
    OllamaProvider.client.abort()
  }
}

export default OllamaProvider
