import openAPIs from '@/api/index.js';
import aigcConfig, {
  AI_MODEL,
  ARK_V3_MODEL_ID,
  ModelSourceType,
  AI_MODEL_MODE,
  LLM_BOT_ID,
} from '@/config/index.ts';
import VERTC, {
  IRTCEngine,
  onUserJoinedEvent,
  onUserLeaveEvent,
  StreamRemoveReason,
  MediaType,
} from '@volcengine/rtc';

/**
 * @brief Event Listeners
 */
export interface IEventListener {
  [VERTC.events.onUserJoined]: (e: onUserJoinedEvent) => void;
  [VERTC.events.onUserLeave]: (e: onUserLeaveEvent) => void;
  [VERTC.events.onUserPublishStream]: (e: { userId: string; mediaType: MediaType }) => void;
  [VERTC.events.onUserUnpublishStream]: (e: { userId: string; mediaType: MediaType; reason: StreamRemoveReason }) => void;
  [VERTC.events.onUserStartAudioCapture]: (e: { userId: string }) => void;
  [VERTC.events.onUserStopAudioCapture]: (e: { userId: string }) => void;
  [VERTC.events.onRoomBinaryMessageReceived]: (e: { userId: string; message: ArrayBuffer }) => void;
}

/**
 * @brief Basic options
 */
export interface BasicOptions {
  appId: string;
  token?: string;
  userId: string;
  roomId: string;
}

/**
 * @brief RTC Client
 */
export class RtcClient {

  /**
   * @brief RTC Engine
   */
  engine!: IRTCEngine;
  /**
   * @brief 相关基础配置
   */
  config!: BasicOptions;
  /**
   * @brief 当前 AI Bot 是否启用
   */
  audioBotEnabled = false;

  /**
   * @brief 引擎初始化
   */
  createEngine = (props: BasicOptions) => {
    this.config = props;
    this.engine = VERTC.createEngine(this.config.appId);
  };

  /**
   * @brief 监听事件
   */
  addEventListeners = (events: IEventListener) => {
    for (const event of Object.keys(events)) {
      this.engine.on(event, events[event]);
    }
  };

  /**
   * @brief 获取可用的音频设备列表
   */
  async getDevices(): Promise<{
    audioInputs: MediaDeviceInfo[];
  }> {
    const audioInputs = await VERTC.enumerateAudioCaptureDevices();
    return {
      audioInputs: audioInputs.map((input) => input.deviceId),
    };
  }

  /**
   * @brief 开启内部音频采集
   */
  startAudioCapture = async (mic: string) => {
    await this.engine.startAudioCapture(mic);
  };

  /**
   * @brief 停止内部音频采集
   */
  stopAudioCapture = async () => {
    await this.engine.stopAudioCapture();
  };

  /**
   * @brief 推流
   */
  publishStream = (mediaType: MediaType) => {
    this.engine.publishStream(mediaType);
  };

  /**
   * @brief 停止推流
   */
  unpublishStream = (mediaType: MediaType) => {
    this.engine.unpublishStream(mediaType);
  };

  /**
   * @brief 进房
   */
  joinRoom = ({ token, username }: {
    token: string;
    username?: string;
  }): Promise<void> => {
    return this.engine.joinRoom(
      token,
      `${this.config.roomId!}`,
      {
        userId: this.config.userId,
        extraInfo: JSON.stringify({
          user_name: username || this.config.userId,
          user_id: this.config.userId,
        }),
      },
      {
        /** 可设置为进房后自动推流 */
        isAutoPublish: true,
        isAutoSubscribeAudio: true,
      }
    );
  };

  /**
   * @brief 离房
   */
  leaveRoom = () => {
    this.stopAudioBot(this.config.roomId, this.config.userId);
    this.engine.leaveRoom();
  };

  /**
   * @brief 启用 AI bot
   */
  startAudioBot = async (
    roomId: string,
    userId: string,
    config,
  ) => {
    if (this.audioBotEnabled) {
      await this.stopAudioBot(roomId, userId);
    }
    /** 调用 openapi 启用 AI bot, 此处详情请参考 openapi 文档 */

    const modeSourceType = config.LLMConfig?.ModeSourceType;
    const originConfig = aigcConfig.getAIGCConfig().Config;
    const mergedConfigs = {
      ...originConfig,
      LLMConfig: {
        APIKey: undefined,
        Url: undefined,
        Feature: undefined,
        ...config.LLMConfig,
        ModeSourceType: undefined,
      },
      TTSConfig: {
        ...originConfig.TTSConfig,
        VoiceType: config.TTSConfig?.VoiceType,
        Cluster: config.TTSConfig?.Cluster,
      },
    };
    const model = config?.LLMConfig?.ModelName;

    await openAPIs.StartVoiceChat({
      AppId: aigcConfig.AppId,
      BusinessId: aigcConfig.BusinessId,
      RoomId: roomId,
      TaskId: userId,
      Config: {
        ...mergedConfigs,
        TTSConfig: {
          ...mergedConfigs.TTSConfig,
        },
        LLMConfig: {
          ...mergedConfigs.LLMConfig,
          Mode:
            modeSourceType === ModelSourceType.Custom ? AI_MODEL_MODE.CUSTOM : AI_MODEL_MODE.ARK_V3,
          EndPointId: ARK_V3_MODEL_ID[model],
          BotId: (LLM_BOT_ID as Record<string, string>)[model],
        },
      },
    });



    // await openAPIs.StartVoiceChat(config); //openAPIs 中封装了对服务端 OpenAPI 的调用逻辑，可根据业务实际逻辑自行实现。
    this.audioBotEnabled = true;
  };

  /**
   * @brief 停止 AI bot
   */
  stopAudioBot = async (roomId: string, userId: string) => {
    if (this.audioBotEnabled) {
      await openAPIs.StopVoiceChat({
        AppId: aigcConfig.AppId,
        BusinessId: aigcConfig.BusinessId,
        RoomId: roomId,
        TaskId: userId,
      });
      this.audioBotEnabled = false;
    }
  };

  /**
   * @brief 打断智能体说话
   */
  stopAudioVoice = async (roomId: string, userId: string) => {
    if (this.audioBotEnabled) {
      const res = await openAPIs.UpdateVoiceChat({
        AppId: aigcConfig.AppId,
        BusinessId: aigcConfig.BusinessId,
        RoomId: roomId,
        TaskId: userId,
        Command: 'interrupt',
      });
      return res;
    }
    return Promise.reject(new Error('AI 打断失败'));
  };
}

export default new RtcClient();