/**
 * LM Studio LLM Provider
 * view: https://github.com/lmstudio-ai/lmstudio-js
 */
import type { LLMCompletionsData, LLMCompletionsCb, LLMModel } from '@/provider/llm/type'
import { llmBaseProvider } from '@/provider/llm/baseProvider'
import { Chat, LMStudioClient } from '@lmstudio/sdk'
import { geneTitlePrompt, clearContent, extractBase64Data } from '@/provider/llm/hooks/utils'

class LMStudioProvider extends llmBaseProvider {
  // 打断标记
  private isBreak = false
  // 客户端
  private static client: LMStudioClient

  protected init() {
    LMStudioProvider.client = new LMStudioClient({
      baseUrl: this.getBaseUrl(),
    })
  }

  public async check() {
    // 尝试调用 api 以判断是否可用
    try {
      await LMStudioProvider.client.system.getLMStudioVersion()
      return true
    } catch {
      return false
    }
  }

  public async getModels() {
    const response: LLMModel[] = []
    const models = await LMStudioProvider.client.system.listDownloadedModels()
    for (const item of models) {
      const line: LLMModel = {
        key: item.modelKey,
        type: item.type,
        name: item.displayName,
        size: item.sizeBytes,
      }
      if (item.type === 'llm') {
        line.toolUse = item.trainedForToolUse
        line.vision = item.vision
      }
      response.push(line)
    }
    return response
  }

  public async geneTitle(model: string, messages: LLMMessage[]) {
    // 注：该方法目前为摸索阶段，使用后生成的标题可能会存在不准确
    try {
      const chat = geneTitlePrompt(messages)
      const loadModel = await LMStudioProvider.client.llm.model(model)
      const result = await loadModel.respond(chat, {
        temperature: 0.3,
        topPSampling: 0.5,
        maxTokens: 100,
        repeatPenalty: 0.7, // 防止重复用词
      })

      return result.nonReasoningContent
    } catch (error) {
      window.console.error(error)
      return ''
    }
  }

  public async completions(data: LLMCompletionsData, callback: LLMCompletionsCb) {
    try {
      this.isBreak = false
      let current = ''
      const chat = Chat.empty()
      for (const item of data.messages) {
        if (item.role !== 'error') {
          const images: any[] = []
          // 处理图片请求
          // 此处处理的较为粗糙，后续版本会进行调整
          if (item.attachs?.length) {
            for (const attach of item.attachs) {
              const image = extractBase64Data(attach.url, true)
              if (image) {
                images.push(
                  await LMStudioProvider.client.files.prepareImageBase64(attach.name, image),
                )
              }
            }
          }
          chat.append(item.role, clearContent(item.content), { images })
        }
      }
      const loadModel = await LMStudioProvider.client.llm.model(data.model)
      const prediction = loadModel.respond(chat, {
        temperature: data.temperature,
        maxTokens: data.max_tokens || false,
      })
      for await (const { content } of prediction) {
        current += content
        callback.onUpdate(current)
        if (this.isBreak) {
          // 打断流输出
          await prediction.cancel()
          break
        }
      }
      callback.onSuccess(current)
    } catch (error) {
      window.console.error(error)
      callback.onError(error)
    }
  }

  public async abort() {
    this.isBreak = true
  }
}

export default LMStudioProvider
