import Openai from "openai";
import { LLMChatMessage, AIAPI } from "../type/llmType.js";

/**
 * 封装OpenAI客户端，支持多API Key轮换
 * @export
 * @class LLM
 */
export class OpenAI implements AIAPI {
  private _client: Openai;
  private readonly _config: {
    model: string;
    temperature: number;
  };
  private readonly _apiKeys: Array<string>;
  private _apiKeyIndex: number = 0;
  private readonly _retries: number;

  /**
   * 创建一个LLM实例
   * @param {{
   *     baseURL: string;  API基础URL (例如：https://api.openai.com/v1)
   *     apiKeys: Array<string>;  多个API Key  (例如：["key1", "key2", "key3"])
   *     model: string;  模型名称 (例如："gpt-4o")
   *     temperature: number;  生成文本的随机性 (例如：0.8) 默认为0.8
   *     retries?: number;  请求失败时的重试次数，默认为0 (例如：3)
   *   }} config
   * @memberof LLM
   */
  constructor(config: {
    baseURL: string;
    apiKeys: Array<string>;
    model: string;
    temperature?: number;
    retries?: number;
  }) {
    if (config.apiKeys.length === 0) {
      throw new Error("至少需要一个API Key");
    }
    const apiKeys: Array<string> = [];
    for (const key of config.apiKeys) {
      if (key.trim() === "") continue;
      if (key.includes(",")) {
        apiKeys.push(
          ...key
            .split(/[,，]/)
            .map((k) => k.trim())
            .filter((k) => k.length > 0)
        );
      } else {
        apiKeys.push(key.trim());
      }
    }
    this._apiKeys = apiKeys;
    config.temperature = config.temperature ?? 0.8;
    this._retries = config.retries ?? 0;
    console.log(`初始化LLM模型 ${config.model}，共 ${this._apiKeys.length} 个API Key`);
    this._config = {
      model: config.model,
      temperature: config.temperature,
    };
    this._client = new Openai({ baseURL: config.baseURL, apiKey: this._apiKeys[0] });
  }

  // 轮换API Key
  private _rotateApiKey() {
    this._apiKeyIndex = (this._apiKeyIndex + 1) % this._apiKeys.length;
    this._client.apiKey = this._apiKeys[this._apiKeyIndex] as string;
    console.log(`模型 ${this._config.model} 切换到API Key索引: ${this._apiKeyIndex}`);
  }

  /**
   *
   * 调用LLM模型生成文本
   * @param {Array<LLMChatMessage>} messages 对话消息列表 (包含系统、用户和助手消息)
   * @param {number} [temperature] 生成文本的随机性，默认为0.8
   * @return {*}  {Promise<string>} 生成的文本内容
   * @memberof LLM
   */
  async chat(
    messages: Array<LLMChatMessage>,
    temperature: number = this._config.temperature
  ): Promise<string> {
    for (let attempt = 0; attempt <= this._retries; attempt++) {
      try {
        const request = { ...this._config, messages, temperature, stream: false };
        const response = await (this._client.chat.completions.create(
          request
        ) as Promise<Openai.Chat.Completions.ChatCompletion>);
        if (response.choices[0]?.message.content) {
          this._rotateApiKey();
          return response.choices[0].message.content;
        } else {
          throw new Error(`模型 ${this._config.model} 返回内容为空`);
        }
      } catch (error) {
        console.error(`模型 ${this._config.model} 调用失败，尝试第 ${attempt} 次:`, error);
        this._rotateApiKey();
      }
    }
    const errorString = `模型 ${this._config.model} 调用失败，已达最大重试次数 ${this._retries}`;
    console.error(errorString);
    throw new Error(errorString);
  }
}
