import { MessageType, Assistant, ChatOptions } from '@/types'
import OpenAI from 'openai'

export const createModelConfig = (maxTokens: number, maxContextTokens: number) => ({
  maxTokens,
  maxContextTokens,
})

// https://platform.openai.com/docs/models
export const modelConfigs = {
  'gpt-4o': createModelConfig(16_384, 128_000),
  'gpt-4o-mini': createModelConfig(16_384, 128_000),
  'o1-preview': createModelConfig(32_768, 128_000),
  'o1-mini': createModelConfig(65_536, 128_000),
  'gpt-4-turbo': createModelConfig(4_096, 128_000),
  'gpt-4': createModelConfig(8_192, 8_192),
}

export type Model = keyof typeof modelConfigs
export const models = Object.keys(modelConfigs).sort() as Model[]

export default class OpenAIModel {
  public name = 'OpenAI'
  public options: Assistant & ChatOptions
  private client: OpenAI

  constructor(options: Assistant & ChatOptions) {
    this.options = {
      ...options,
      apiHost: options.apiHost?.trim() || 'https://api.openai.com/v1',
    }
    this.client = new OpenAI({
      apiKey: this.options.apiKey,
      baseURL: this.options.apiHost,
      dangerouslyAllowBrowser: true,
      // organization: 'moss.tuzki.cc', // 设置了organization后，会有跨域问题，最好用公共访问的形式
      // project: 'moss',
      defaultHeaders: { // 声明来源
        'HTTP-Referer': 'moss.tuzki.cc',
        'X-Title': 'Moss',
      },
    })
  }

  async chat(messages: MessageType[]) {
    const {
      modelName,
      max_tokens_limit,
      max_completion_tokens,
      store,
      reasoning_effort,
      frequency_penalty,
      logit_bias,
      presence_penalty,
      temperature,
      top_p,
    } = this.options

    // https://platform.openai.com/docs/api-reference/chat/create
    const stream = await this.client.chat.completions.create({
      messages: messages.map(({ role, content }) => ({ role, content })),
      model: modelName,
      stream: true,
      store: store || false,
      reasoning_effort,
      frequency_penalty,
      logit_bias: logit_bias && typeof logit_bias[Symbol.iterator] === 'function'
        ? Object.fromEntries(logit_bias)
        : undefined,
      max_completion_tokens: max_tokens_limit ? Number(max_completion_tokens) || undefined : undefined,
      presence_penalty,
      temperature,
      top_p,

    } as OpenAI.ChatCompletionCreateParamsStreaming)

    return {
      stream,
      deepChunk: (chunk: OpenAI.Chat.Completions.ChatCompletionChunk) => {
        return chunk.choices[0]?.delta?.content
      },
      abort: () => {
        stream.controller.abort()
      },
    }
  }
}
