import { MessageType, Assistant, ChatOptions } from '@/types'
import { GoogleGenerativeAI } from '@google/generative-ai'

export const createModelConfig = (maxTokens: number, maxContextTokens: number) => ({
  maxTokens,
  maxContextTokens,
})

// https://platform.openai.com/docs/models
export const modelConfigs = {
  'gemini-2.0-flash-exp': createModelConfig(16_384, 128_000),
  'gemini-1.5-flash': createModelConfig(16_384, 128_000),
}

export type Model = keyof typeof modelConfigs
export const models = Object.keys(modelConfigs).sort() as Model[]

export default class GeminiModel {
  public name = 'Gemini'
  public options: Assistant & ChatOptions
  private client: GoogleGenerativeAI

  constructor(options: Assistant & ChatOptions) {
    this.options = {
      ...options,
      apiHost: options.apiHost?.trim() || 'https://generativelanguage.googleapis.com/v1beta/models/',
    }
    this.client = new GoogleGenerativeAI(this.options.apiKey)
  }

  async chat(messages: MessageType[]) {
    const model = this.client.getGenerativeModel({ model: this.options.modelName || 'gemini-2.0-flash-exp' })
    const chat = model.startChat({
      history: messages.map((m) => {
        return {
          role: m.role === 'assistant' ? 'model' : m.role,
          parts: [{ text: m.content }],
        }
      }).filter((item: { role: string, parts: unknown }) => {
        return item.role && item.role !== 'system'
      }).slice(0, -2),
    })

    // https://platform.openai.com/docs/api-reference/chat/create
    const { stream } = await chat.sendMessageStream(messages[messages.length - 1].content)

    return {
      stream,
      deepChunk: (chunk: { text: () => void }) => { // 这里本来应该时一个generative-ai的类型，但是暂时没有找到
        return chunk.text()
      },
      abort: () => {
        // 暂时不支持取消
      },
    }
  }
}
