/** biome-ignore-all lint/suspicious/noExplicitAny: ignore all */
import { isSupportedModel } from "@main/utils/models";
import logger from "@shared/logger/main-logger";
import type {
  CreateModelData,
  Message,
  Model,
  Provider,
  ThreadFormChatParameters,
  UpdateProviderData,
} from "@shared/triplit/types";
import type { OpenAITools } from "@shared/types/openai";
import { isNull } from "es-toolkit";
import type OpenAI from "openai";
import type { Stream } from "openai/streaming";

// Use OpenAI SDK types instead of custom types
export type OpenAIStreamResponse =
  Stream<OpenAI.Chat.Completions.ChatCompletionChunk>;

// Define message interface compatible with OpenAI
export type ModelMessage = OpenAI.Chat.Completions.ChatCompletionMessageParam;

import {
  cleanupAbortController as cleanupAbortControllerForTab,
  createAbortController as createAbortControllerForTab,
} from "./stream-manager";

export interface ChatMessage {
  role: "user" | "assistant" | "system" | "function" | "tool";
  content: string;
  renderedContent?: string;
  id: string; // Add message ID to support attachment lookup
  tool_call_id?: string; // For tool result messages
}

export interface StreamChatParams {
  tabId: string;
  threadId: string;
  userMessageId: string;
  messages: ChatMessage[];
  model: Model;
  provider: Provider;
  tools?: OpenAITools;
  regeneration?: boolean;

  chatParameters: ThreadFormChatParameters;
}

export abstract class BaseProviderService {
  protected provider: Provider;
  protected models: CreateModelData[] = [];

  constructor(provider: Provider) {
    this.provider = provider;
  }

  // Helper to create and store an AbortController
  protected createAbortController(tabId: string): AbortController {
    return createAbortControllerForTab(tabId);
  }

  // Helper to clean up the AbortController after the stream is done
  protected cleanupAbortController(tabId: string) {
    cleanupAbortControllerForTab(tabId);
  }

  abstract checkApiKey(): Promise<{
    isOk: boolean;
    errorMsg: string | null;
  }>;

  abstract updateProvider(updateData: UpdateProviderData): void;

  async fetchModels(): Promise<CreateModelData[]> {
    try {
      const models = await this.fetchProviderModels();
      const supportedModels = models.filter((model) => isSupportedModel(model));
      this.models = supportedModels;
      logger.debug("Fetch models successfully", {
        providerName: this.provider.name,
        supportedModelsCount: supportedModels.length,
      });

      return supportedModels;
    } catch (error) {
      logger.error("Failed to fetch models:", { error });
      if (!this.models) {
        this.models = [];
      }
      return [];
    }
  }

  protected abstract fetchProviderModels(): Promise<CreateModelData[]>;

  abstract startStreamChat(
    params: StreamChatParams,
    abortController: AbortController,
  ): Promise<OpenAIStreamResponse>;

  protected async _startStreamChat(
    params: StreamChatParams,
    abortController: AbortController,
    openaiClient: OpenAI,
  ): Promise<OpenAIStreamResponse> {
    const { tabId, threadId, messages, model, tools, chatParameters } = params;
    const {
      temperature = null,
      topP = null,
      maxTokens = null,
      presencePenalty = null,
      frequencyPenalty = null,
    } = chatParameters ?? {};

    logger.debug("Chat parameters", {
      temperature,
      topP,
      maxTokens,
      presencePenalty,
      frequencyPenalty,
    });

    try {
      logger.info(`Starting stream chat for tab ${tabId}, thread ${threadId}`, {
        hasTools: !!tools && tools.length > 0,
        toolCount: tools?.length || 0,
      });

      const modelMessages = messages.map((msg) => ({
        ...msg,
        content: msg.renderedContent ?? msg.content,
      }));

      const chatCompletionParams: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming =
        {
          model: model.name,
          messages: modelMessages as ModelMessage[],
          stream: true,

          // * Chat Parameters

          ...(!isNull(temperature) && { temperature }),
          ...(!isNull(topP) && { top_p: topP }),
          ...(!isNull(maxTokens) && { max_tokens: maxTokens }),
          ...(!isNull(presencePenalty) && {
            presence_penalty: presencePenalty,
          }),
          ...(!isNull(frequencyPenalty) && {
            frequency_penalty: frequencyPenalty,
          }),
        };

      // Add tools if provided
      if (tools && tools.length > 0) {
        chatCompletionParams.tools = tools;
        logger.debug("Adding MCP tools to chat completion", {
          toolNames: tools.map((t) => t.function.name),
        });
      }

      const stream = await openaiClient.chat.completions.create(
        chatCompletionParams,
        {
          signal: abortController.signal,
        },
      );

      return stream;
    } catch (error) {
      logger.error("Failed to start stream chat:", { error });
      throw error;
    }
  }

  abstract summaryTitle(
    messages: Message[],
    modelName: string,
  ): Promise<{
    isOk: boolean;
    title: string;
    errorMsg: string | null;
  }>;
}

/**
 * Extended interface for providers that support MCP-specific features
 */
export interface McpSupportedProvider {
  startStreamChatForMcp(
    params: StreamChatParams,
    abortController: AbortController,
  ): Promise<OpenAIStreamResponse>;
}
