import { getMcpReflectionSystemPrompt } from "@main/prompts/mcp-reflection-system";
import {
  CommunicationWay,
  ServiceHandler,
  ServiceRegister,
} from "@main/shared/reflect";
import { TYPES } from "@main/shared/types";
import { extractErrorMessage } from "@main/utils/error-utils";
import { convertMessagesToModelMessages } from "@main/utils/message-converter";
import {
  DEFAULT_SMOOTHER_CONFIG,
  StreamSmoother,
  type StreamSmootherConfig,
} from "@main/utils/stream-smoother";
import logger from "@shared/logger/main-logger";
import type {
  CreateModelData,
  Message,
  Model,
  Provider,
  UpdateProviderData,
} from "@shared/triplit/types";
import type {
  OpenAIToolCall,
  OpenAITools,
  ToolCallResult,
} from "@shared/types/openai";
import { inject, injectable } from "inversify";
import { nanoid } from "nanoid";
import type OpenAI from "openai";
import type { Stream } from "openai/streaming";
import type { ChatService } from "../chat-service";
import type { ConfigDbService } from "../db-service/config-db-service";
import type { ThreadDbService } from "../db-service/thread-db-service";
import {
  EventNames,
  emitter,
  sendToMain,
  sendToRenderer,
  sendToThread,
} from "../event-service";
import type { McpService } from "../mcp-service";
import type { McpToolExecutor } from "../mcp-tool-executor";
import type { MessageService } from "../message-service";
import type { ModelService } from "../model-service";
import type { SequentialContentManager } from "../sequential-content-manager";
import type { SettingsService } from "../settings-service";
import type { ThreadFormsService } from "../thread-forms-service";
import { AI302ProviderService } from "./302AI-provider-service/302AI-provider-service";
import type {
  BaseProviderService,
  ChatMessage,
  McpSupportedProvider,
  OpenAIStreamResponse,
  StreamChatParams,
} from "./base-provider-service";
import { OpenAIProviderService } from "./openAI-provider-service";
import {
  abortStream,
  cleanupAbortController,
  createAbortController,
} from "./stream-manager";

@ServiceRegister(TYPES.ProviderService)
@injectable()
export class ProviderService {
  private providerMap: Map<string, Provider> = new Map(); // * Cache provider to find provider by id
  private providerInstMap: Map<string, BaseProviderService> = new Map(); // * Cache provider instances to avoid duplicate creation
  private messageEventDataMap: Map<
    string,
    { selectedMcpServerIds?: string[] }
  > = new Map(); // * Cache message event data for threads

  constructor(
    @inject(TYPES.ChatService) private chatService: ChatService,
    @inject(TYPES.MessageService) private messageService: MessageService,
    @inject(TYPES.SettingsService) private settingsService: SettingsService,
    @inject(TYPES.ConfigDbService) private configDbService: ConfigDbService,
    @inject(TYPES.McpService) private mcpService: McpService,
    @inject(TYPES.McpToolExecutor) private mcpToolExecutor: McpToolExecutor,
    @inject(TYPES.SequentialContentManager)
    private sequentialContentManager: SequentialContentManager,
    @inject(TYPES.ThreadDbService) private threadDbService: ThreadDbService,
    @inject(TYPES.ModelService) private modelService: ModelService,
    @inject(TYPES.ThreadFormsService)
    private threadFormsService: ThreadFormsService,
  ) {
    this.setupEventListeners();
    this.initProviders();
    this.setupMessageEventListeners();
  }

  private async initProviders() {
    const providers = await this.configDbService.getProviders();
    for (const provider of providers) {
      this.providerMap.set(provider.id, provider);
      if (provider.enabled) {
        try {
          logger.info("init provider", {
            providerId: provider.id,
            providerName: provider.name,
            apiType: provider.apiType,
          });
          const providerInst = this.createProviderInst(provider);
          if (providerInst) {
            this.providerInstMap.set(provider.id, providerInst);
          }
        } catch (error) {
          logger.error("Failed to init provider", {
            providerId: provider.id,
            error,
          });
        }
      }
    }
  }

  private setupEventListeners() {
    emitter.on(EventNames.PROVIDER_ADD, ({ provider }) => {
      this.handleProviderAdded(provider);
    });
    emitter.on(EventNames.PROVIDER_DELETE, ({ providerId }) => {
      this.handleProviderDeleted(providerId);
    });
    emitter.on(EventNames.PROVIDER_UPDATE, ({ providerId, updateData }) => {
      this.handleProviderUpdated(providerId, updateData);
    });

    // MCP tool call event listeners for sequential content updates
    emitter.on(
      EventNames.TOOL_CALL_START,
      ({ threadId, messageId, toolCallId }) => {
        this.handleToolCallStart(threadId, messageId, toolCallId);
      },
    );
    emitter.on(
      EventNames.TOOL_CALL_SUCCESS,
      ({ threadId, messageId, toolCallId, result, executionTime }) => {
        this.handleToolCallSuccess(
          threadId,
          messageId,
          toolCallId,
          result,
          executionTime,
        );
      },
    );
    emitter.on(
      EventNames.TOOL_CALL_ERROR,
      ({ threadId, messageId, toolCallId, error }) => {
        this.handleToolCallError(threadId, messageId, toolCallId, error);
      },
    );
  }

  private setupMessageEventListeners() {
    emitter.on(
      EventNames.MESSAGE_SEND_FROM_USER,
      ({ threadId, selectedMcpServerIds }) => {
        // Cache the MCP server IDs for this thread
        if (selectedMcpServerIds && selectedMcpServerIds.length > 0) {
          this.messageEventDataMap.set(threadId, { selectedMcpServerIds });

          // Clean up after 30 seconds to prevent memory leaks
          setTimeout(() => {
            this.messageEventDataMap.delete(threadId);
          }, 30000);
        }
      },
    );
  }

  private getLatestMessageEventData(
    threadId: string,
  ): { selectedMcpServerIds?: string[] } | undefined {
    return this.messageEventDataMap.get(threadId);
  }

  private handleProviderAdded(provider: Provider) {
    try {
      logger.info("Adding provider to cache", {
        providerId: provider.id,
        providerName: provider.name,
      });

      this.providerMap.set(provider.id, provider);

      if (provider.enabled) {
        const providerInst = this.createProviderInst(provider);
        if (providerInst) {
          this.providerInstMap.set(provider.id, providerInst);
          logger.info("Provider instance created and cached", {
            providerName: provider.name,
          });
        }
      }

      logger.info("Provider added to cache successfully", {
        providerName: provider.name,
      });
    } catch (error) {
      logger.error("Failed to add provider to cache", {
        providerId: provider.id,
        error,
      });
    }
  }

  private handleProviderDeleted(providerId: string) {
    try {
      const provider = this.providerMap.get(providerId);
      const providerName = provider?.name || providerId;

      logger.warn("Provider deletion event received", {
        providerId,
        providerName,
        providerExists: !!provider,
        totalCachedProviders: this.providerMap.size,
        allProviderIds: Array.from(this.providerMap.keys()),
      });

      if (provider) {
        logger.info("Removing provider from cache", {
          providerId,
          providerName,
        });

        this.providerMap.delete(providerId);
        this.providerInstMap.delete(providerId);

        logger.info("Provider removed from cache successfully", {
          providerName,
          remainingProviders: this.providerMap.size,
        });
      } else {
        logger.warn("Attempted to delete non-existent provider from cache", {
          providerId,
          availableProviders: Array.from(this.providerMap.keys()),
        });
      }
    } catch (error) {
      logger.error("Failed to remove provider from cache", {
        providerId,
        error,
      });
    }
  }

  private handleProviderUpdated(
    providerId: string,
    updateData: UpdateProviderData,
  ) {
    const provider = this.providerMap.get(providerId);
    if (!provider) {
      return;
    }

    this.providerMap.set(providerId, {
      ...provider,
      ...updateData,
    });

    const providerInst = this.providerInstMap.get(providerId);
    if (providerInst) {
      providerInst.updateProvider(updateData);
    }
  }

  private createProviderInst(
    provider: Provider,
  ): BaseProviderService | undefined {
    switch (provider.apiType) {
      case "openai":
        return new OpenAIProviderService(provider);
      case "302ai":
        return new AI302ProviderService(provider, this.settingsService);

      default:
        logger.warn("Unknown provider type", { apiType: provider.apiType });
        return undefined;
    }
  }

  private async getProviderInst(
    providerId: string,
  ): Promise<BaseProviderService> {
    let providerInst = this.providerInstMap.get(providerId);

    if (!providerInst) {
      logger.info("Creating new provider instance", { providerId });

      const provider = await this.getProviderById(providerId);
      logger.info("Provider found for instance creation", {
        providerId,
        providerName: provider.name,
        providerApiType: provider.apiType,
        providerEnabled: provider.enabled,
      });

      if (!provider.enabled) {
        throw new Error(
          `Cannot create instance for disabled provider: ${provider.name}`,
        );
      }

      providerInst = this.createProviderInst(provider);

      if (!providerInst) {
        throw new Error(
          `Failed to create provider instance for type: ${provider.apiType}, provider: ${provider.name}`,
        );
      }

      this.providerInstMap.set(providerId, providerInst);
      logger.info("Provider instance created successfully", {
        providerId,
        providerName: provider.name,
        providerApiType: provider.apiType,
      });
    }

    return providerInst;
  }

  private async getStreamConfig(): Promise<StreamSmootherConfig> {
    const [enabled, speed] = await Promise.all([
      this.settingsService.getStreamSmootherEnabled(),
      this.settingsService.getStreamSpeed(),
    ]);

    const speedMultipliers = {
      slow: 0.5, // 50% speed
      normal: 1.0, // 100% speed
      fast: 2.0, // 200% speed
    };

    const multiplier = speedMultipliers[speed];

    return {
      ...DEFAULT_SMOOTHER_CONFIG,
      enabled,
      baseSpeed: DEFAULT_SMOOTHER_CONFIG.baseSpeed * multiplier,
      minSpeed: DEFAULT_SMOOTHER_CONFIG.minSpeed * multiplier,
      maxSpeed: DEFAULT_SMOOTHER_CONFIG.maxSpeed * multiplier,
    };
  }

  private async getProviderById(providerId: string): Promise<Provider> {
    let provider = this.providerMap.get(providerId);

    if (!provider) {
      logger.warn(
        "Provider not found in cache, attempting to reload all providers from database",
        {
          providerId,
          cachedProviders: Array.from(this.providerMap.keys()),
          cachedProvidersCount: this.providerMap.size,
        },
      );

      try {
        // Reload all providers from database to ensure cache consistency
        const allProviders = await this.configDbService.getProviders();

        logger.info("Reloading all providers to cache", {
          providerId,
          totalProvidersInDb: allProviders.length,
          providersInDb: allProviders.map((p) => ({
            id: p.id,
            name: p.name,
            enabled: p.enabled,
          })),
        });

        // Clear and rebuild cache
        this.providerMap.clear();
        this.providerInstMap.clear();

        // Add all providers to cache
        for (const dbProvider of allProviders) {
          this.providerMap.set(dbProvider.id, dbProvider);

          // Recreate enabled provider instances
          if (dbProvider.enabled) {
            try {
              const providerInst = this.createProviderInst(dbProvider);
              if (providerInst) {
                this.providerInstMap.set(dbProvider.id, providerInst);
              }
            } catch (error) {
              logger.warn(
                "Failed to recreate provider instance during cache rebuild",
                {
                  providerId: dbProvider.id,
                  providerName: dbProvider.name,
                  error: error instanceof Error ? error.message : String(error),
                },
              );
            }
          }
        }

        provider = this.providerMap.get(providerId);

        if (provider) {
          logger.info("Provider successfully recovered from database", {
            providerId,
            providerName: provider.name,
            providerEnabled: provider.enabled,
            totalCachedProviders: this.providerMap.size,
          });
        }
      } catch (error) {
        logger.error("Failed to reload providers from database", {
          providerId,
          error: error instanceof Error ? error.message : String(error),
        });
      }
    }

    if (!provider) {
      throw new Error(
        `Provider ${providerId} not found in cache or database. Available providers: ${Array.from(this.providerMap.keys()).join(", ")}`,
      );
    }

    return provider;
  }

  /**
   * Validate cache health and auto-repair if needed
   */
  private async validateProviderCache(): Promise<void> {
    try {
      if (this.providerMap.size === 0) {
        logger.warn("Provider cache is empty, rebuilding from database");
        await this.initProviders();
      }
    } catch (error) {
      logger.error("Failed to validate provider cache", { error });
    }
  }

  @ServiceHandler(CommunicationWay.RENDERER_TO_MAIN__TWO_WAY)
  async checkApiKey(
    _event: Electron.IpcMainEvent,
    provider: Provider,
  ): Promise<{
    isOk: boolean;
    errorMsg: string | null;
  }> {
    try {
      const providerInst = this.createProviderInst(provider);
      if (!providerInst) {
        return {
          isOk: false,
          errorMsg: `Unsupported provider type: ${provider.apiType}`,
        };
      }

      const { isOk, errorMsg } = await providerInst.checkApiKey();

      return { isOk, errorMsg };
    } catch (error) {
      const errorMessage = extractErrorMessage(error);
      return { isOk: false, errorMsg: errorMessage };
    }
  }

  @ServiceHandler(CommunicationWay.RENDERER_TO_MAIN__TWO_WAY)
  async fetchModels(
    _event: Electron.IpcMainEvent,
    provider: Provider,
  ): Promise<CreateModelData[]> {
    try {
      const providerInst = await this.getProviderInst(provider.id);
      return await providerInst.fetchModels();
    } catch (error) {
      logger.error("Failed to fetch models", {
        providerId: provider.id,
        error,
      });
    }

    return [];
  }

  @ServiceHandler(CommunicationWay.RENDERER_TO_MAIN__TWO_WAY)
  async startStreamChat(
    _event: Electron.IpcMainEvent,
    params: StreamChatParams,
  ): Promise<{ success: boolean; error?: string }> {
    const { threadId, messages, userMessageId, provider, model, regeneration } =
      params;

    let assistantMessage: Message | null = null;
    let conversationMessages = [...messages] as ChatMessage[];

    try {
      // Validate provider cache health before starting stream
      await this.validateProviderCache();

      // Add detailed logging for debugging streaming chat issues
      logger.info("Starting stream chat", {
        threadId,
        userMessageId,
        providerId: provider.id,
        providerName: provider.name,
        providerApiType: provider.apiType,
        providerEnabled: provider.enabled,
        modelId: model.id,
        modelName: model.name,
        messageCount: messages.length,
        cachedProviders: this.providerMap.size,
      });

      // Check if provider exists and is enabled
      if (!provider.enabled) {
        throw new Error(`Provider ${provider.name} is not enabled`);
      }

      const providerInst = await this.getProviderInst(provider.id);
      const abortController = createAbortController(threadId);

      const newMessages = await convertMessagesToModelMessages(
        messages,
        userMessageId,
      );
      conversationMessages = newMessages as ChatMessage[];

      assistantMessage = await this.chatService.createAssistantMessage({
        threadId,
        providerId: provider.id,
        parentMessageId: userMessageId,
        modelId: model.id,
        modelName: model.name,
      });

      sendToMain(EventNames.PROVIDER_CONVERSATION_CREATED, {
        threadId,
      });

      // Get MCP server IDs for this thread
      const mcpServerIds = await this.getMcpServerIdsForThread(threadId);

      // Fetch MCP tools for this thread
      let mcpTools: OpenAITools = [];
      if (mcpServerIds.length > 0) {
        try {
          mcpTools =
            await this.mcpService.getMcpToolsAsOpenAIFunctions(mcpServerIds);
          logger.info("Fetched MCP tools for thread", {
            threadId,
            toolCount: mcpTools.length,
            toolNames: mcpTools.map((t) => t.function.name),
          });
        } catch (error) {
          logger.warn(
            "Failed to fetch MCP tools for thread, continuing without tools",
            {
              threadId,
              error,
            },
          );
        }
      }

      // Start the tool calling loop - use reflection-action loop for MCP tools
      const useReflectionActionLoop = mcpTools.length > 0; // Use new approach when MCP tools are available

      const finalContent = useReflectionActionLoop
        ? await this.handleReflectionActionLoop(
            providerInst,
            conversationMessages,
            mcpTools,
            mcpServerIds,
            assistantMessage,
            abortController,
            { threadId, model },
          )
        : await this.handleToolCallingLoop(
            providerInst,
            conversationMessages,
            mcpTools,
            mcpServerIds,
            assistantMessage,
            abortController,
            { threadId, model },
          );

      if (!finalContent || finalContent.trim() === "") {
        // If no content but there were tool calls, this might be expected
        const hasToolCalls =
          assistantMessage.toolCalls &&
          JSON.parse(assistantMessage.toolCalls || "[]").length > 0;

        if (!hasToolCalls) {
          await this.chatService.updateMessage(assistantMessage.id, {
            status: "error",
          });
          sendToMain(EventNames.PROVIDER_CONVERSATION_FAILED, {
            threadId,
          });
          return {
            success: false,
            error: "No content received from provider",
          };
        }
      }

      await this.chatService.updateMessage(assistantMessage.id, {
        status: "success",
      });
      sendToMain(EventNames.PROVIDER_CONVERSATION_COMPLETED, {
        threadId,
        regeneration,
      });

      logger.info("Stream chat completed", { threadId });

      return {
        success: true,
      };
    } catch (error) {
      if (!assistantMessage) {
        return {
          success: false,
          error: extractErrorMessage(error),
        };
      }

      if (error instanceof Error && error.name === "AbortError") {
        logger.info("Stream aborted", { threadId });

        await this.chatService.updateMessage(assistantMessage.id, {
          status: "stop",
        });
        sendToMain(EventNames.PROVIDER_CONVERSATION_CANCELLED, {
          threadId,
        });
        return { success: true };
      }

      logger.error("Stream chat error", { threadId, error });

      await this.chatService.updateMessage(assistantMessage.id, {
        status: "error",
      });
      sendToMain(EventNames.PROVIDER_CONVERSATION_FAILED, {
        threadId,
      });

      return {
        success: false,
        error: extractErrorMessage(error),
      };
    } finally {
      cleanupAbortController(threadId);
    }
  }

  @ServiceHandler(CommunicationWay.RENDERER_TO_MAIN__TWO_WAY)
  async stopStreamChat(
    _event: Electron.IpcMainEvent,
    params: { threadId: string },
  ): Promise<{ success: boolean }> {
    const { threadId } = params;
    const aborted = abortStream(threadId);
    logger.info("Stream chat stop requested", { threadId, aborted });

    if (aborted) {
      try {
        const messages =
          await this.messageService._getMessagesByThreadId(threadId);
        const pendingAssistantMessage = messages
          .filter(
            (msg: Message) =>
              msg.role === "assistant" && msg.status === "pending",
          )
          .pop();

        if (!pendingAssistantMessage) {
          logger.warn("No pending assistant message found", { threadId });
          return { success: true };
        }

        await this.chatService.updateMessage(pendingAssistantMessage.id, {
          status: "stop",
        });
        sendToMain(EventNames.PROVIDER_CONVERSATION_CANCELLED, {
          threadId,
        });

        logger.info("Updated pending message status to stop", {
          threadId,
          messageId: pendingAssistantMessage.id,
        });
      } catch (error) {
        logger.error("Failed to update pending message status on stream stop", {
          threadId,
          error,
        });
      }
    }

    return { success: true };
  }

  async summaryTitle(
    modelName: string,
    providerId: string,
    messages: Message[],
  ): Promise<{
    isOk: boolean;
    errorMsg: string | null;
    title: string;
  }> {
    try {
      const providerInst = await this.getProviderInst(providerId);
      const result = await providerInst.summaryTitle(messages, modelName);

      return {
        isOk: result.isOk,
        errorMsg: result.errorMsg,
        title: result.title,
      };
    } catch (error) {
      return {
        isOk: false,
        errorMsg:
          error instanceof Error
            ? error.message
            : `Provider instance error: ${providerId}`,
        title: "",
      };
    }
  }

  /**
   * Re-run a specific MCP tool call for a message
   * - Finds the original tool call by toolCallId from sequential content
   * - Executes the tool again using the original name and arguments
   * - UI updates are handled by existing TOOL_CALL_* event listeners
   */
  @ServiceHandler(CommunicationWay.RENDERER_TO_MAIN__ONE_WAY)
  async rerunToolCall(
    _event: Electron.IpcMainEvent,
    threadId: string,
    messageId: string,
    toolCallId: string,
  ): Promise<void> {
    try {
      // 1) 获取线程、Provider、Model
      const thread = await this.threadDbService.getThreadById(threadId);
      if (!thread) {
        logger.warn("Thread not found for rerunToolCall", { threadId });
        return;
      }
      // Provider fallback: cache -> DB
      let provider = this.providerMap.get(thread.providerId);
      if (!provider) {
        try {
          const providers = await this.configDbService.getProviders();
          provider =
            providers.find((p) => p.id === thread.providerId) || undefined;
          if (provider) this.providerMap.set(provider.id, provider);
        } catch {}
      }
      // Model fallback: DB -> thread params stub
      const modelRecord = await this.modelService._getModelById(thread.modelId);
      let modelForRun: import("@shared/triplit/types").Model | null = null;
      if (modelRecord) {
        modelForRun = {
          id: modelRecord.id,
          name: modelRecord.name,
          providerId: modelRecord.providerId,
          custom: modelRecord.custom,
          enabled: modelRecord.enabled,
          collected: modelRecord.collected,
          remark: modelRecord.remark,
          type: "language",
          capabilities: new Set<string>(),
        } as import("@shared/triplit/types").Model;
      } else {
        try {
          const { modelName } =
            await this.threadDbService.getTitleSummaryParams(threadId);
          modelForRun = {
            id: thread.modelId,
            name: modelName,
            providerId: thread.providerId,
            custom: false,
            enabled: true,
            collected: false,
            remark: "",
            type: "language",
            capabilities: new Set<string>(),
          } as import("@shared/triplit/types").Model;
        } catch {}
      }
      if (!provider || !modelForRun) {
        logger.warn("Provider or Model not found for rerunToolCall", {
          threadId,
          providerId: thread.providerId,
          modelId: thread.modelId,
        });
        return;
      }

      // 2) 找到包含该 toolCall 的助手消息索引（优先用 messageId，其次回退到字段扫描）
      const allMessages =
        await this.messageService._getMessagesByThreadId(threadId);

      // 2.1 直接通过传入的 messageId 定位（最可靠，因为 UI 已提供宿主消息 ID）
      let assistantIndex = allMessages.findIndex((m) => m.id === messageId);

      // 2.2 回退：扫描旧结构中的 toolCalls 字段
      if (assistantIndex === -1) {
        assistantIndex = allMessages.findIndex((m) => {
          if (m.role !== "assistant") return false;
          if (!m.toolCalls) return false;
          try {
            const tcs = JSON.parse(m.toolCalls) as ToolCallResult[];
            return tcs.some((tc) => tc.id === toolCallId);
          } catch {
            return false;
          }
        });
      }

      // 2.3 回退：扫描顺序内容 contentBlocks（反思-工具-反思工作流使用该字段）
      if (assistantIndex === -1) {
        assistantIndex = allMessages.findIndex((m) => {
          if (m.role !== "assistant" || !m.contentBlocks) return false;
          try {
            const sequentialContent = JSON.parse(
              m.contentBlocks,
            ) as import("@shared/types/sequential-content").SequentialMessageContent;
            return sequentialContent.blocks.some((b) => {
              if (b.type !== "tool_call") return false;
              const block =
                b as import("@shared/types/sequential-content").ToolCallContentBlock;
              return block.toolCall?.id === toolCallId;
            });
          } catch {
            return false;
          }
        });
      }
      if (assistantIndex === -1) {
        logger.warn("Assistant message containing tool call not found", {
          threadId,
          messageId,
          toolCallId,
        });
        return;
      }

      // 3) 构造重新开始的对话上下文（仅保留该助手消息之前的消息）
      const priorDbMessages = allMessages.slice(0, assistantIndex);

      const currentMessages: ChatMessage[] = priorDbMessages.map((m) => ({
        role: m.role as ChatMessage["role"],
        content: m.content || "",
        id: m.id,
      }));

      // 4) 删除该助手消息之后的所有消息（保留本条助手消息），避免旧的顺序块残留
      const messagesToDelete = allMessages.slice(assistantIndex + 1);
      for (const msg of messagesToDelete) {
        try {
          await this.messageService.deleteMessage(
            {} as Electron.IpcMainEvent,
            msg.id,
            threadId,
          );
        } catch (err) {
          logger.warn("Failed to delete message during rerun", {
            messageId: msg.id,
            error: this.extractErrorMessage(err),
          });
        }
      }

      // 5) 在保留该助手消息的前置 part 情况下：修剪该消息的顺序内容到目标 toolcall 之前
      const assistantMessage = allMessages[assistantIndex];

      // Set assistant message status to pending to hide action buttons during rerun
      // await this.chatService.(assistantMessage.id, {
      //   status: "pending",
      // });

      // Send stream status update directly to avoid fetchMessages call that could overwrite streaming state
      sendToThread(threadId, EventNames.CHAT_STREAM_STATUS_UPDATE, {
        threadId,
        status: "initializing",
      });

      let toolName = "";
      let toolArgs = "{}";
      try {
        if (assistantMessage.contentBlocks) {
          const sc = JSON.parse(
            assistantMessage.contentBlocks,
          ) as import("@shared/types/sequential-content").SequentialMessageContent;
          const targetBlock = sc.blocks.find(
            (b) =>
              b.type === "tool_call" &&
              (
                b as import("@shared/types/sequential-content").ToolCallContentBlock
              ).toolCall.id === toolCallId,
          ) as
            | import("@shared/types/sequential-content").ToolCallContentBlock
            | undefined;
          if (targetBlock) {
            // 保存原始工具名和参数
            toolName = targetBlock.toolCall.name;
            toolArgs = targetBlock.toolCall.arguments;

            // 修剪：仅保留目标之前的块
            const keptBlocks = sc.blocks.filter(
              (b) => b.order < targetBlock.order,
            );
            const trimmedContent = {
              ...sc,
              blocks: keptBlocks,
              isComplete: false,
              currentBlockId: keptBlocks.at(-1)?.id,
            };
            await this.chatService.updateMessage(assistantMessage.id, {
              contentBlocks: JSON.stringify(trimmedContent),
            });
            // 立刻通知前端用保留的块替换渲染，避免旧内容与新内容并存
            sendToRenderer(EventNames.CONTENT_BLOCK_UPDATE, {
              threadId,
              messageId: assistantMessage.id,
              blockId: "reset",
              updateData: { replaceBlocks: keptBlocks },
            });
          }
        }
      } catch (e) {
        logger.warn("Failed to trim contentBlocks before rerun", {
          error: this.extractErrorMessage(e),
        });
      }

      // 6) 重新执行当前工具，并继续后续反思-工具循环，所有新块将追加到同一条助手消息
      const providerInst = await this.getProviderInst(provider.id);
      const mcpServerIds = await this.getMcpServerIdsForThread(threadId);

      // 如果未能从 contentBlocks 获取到工具信息，则从旧字段回退尝试
      if (!toolName || !toolArgs) {
        if (assistantMessage.toolCalls) {
          try {
            const tcs = JSON.parse(
              assistantMessage.toolCalls,
            ) as ToolCallResult[];
            const tc = tcs.find((t) => t.id === toolCallId);
            if (tc) {
              toolName = tc.name;
              toolArgs = tc.arguments;
            }
          } catch {}
        }
      }

      // 构造并执行工具调用（会创建新的 tool_call 块并更新顺序内容）
      if (toolName) {
        // Send in-progress event with empty delta to trigger stream immediately
        sendToMain(EventNames.PROVIDER_CONVERSATION_IN_PROGRESS, {
          threadId,
          delta: "", // Empty delta to trigger stream without adding content
        });

        const toolCall: OpenAIToolCall = {
          id: toolCallId,
          type: "function",
          function: { name: toolName, arguments: toolArgs },
        };
        await this.executeToolCallsAndUpdateConversation(
          [toolCall],
          assistantMessage,
          threadId,
          mcpServerIds,
          currentMessages,
        );
      }

      // 继续后续反思-工具循环（不重新初始化顺序内容）
      {
        const availableToolNames = (
          (await this.mcpService.getMcpToolsAsOpenAIFunctions(mcpServerIds)) ||
          []
        ).map((t) => t.function.name);
        const systemPrompt = {
          id: nanoid(),
          role: "system" as const,
          content: getMcpReflectionSystemPrompt(availableToolNames),
        };
        const abortController2 = createAbortController(threadId);
        try {
          let loopCount = 0;
          const maxLoops = 10;
          while (loopCount < maxLoops) {
            loopCount++;
            const reflectionBlockId =
              await this.sequentialContentManager.addReflectionBlock(
                assistantMessage.id,
                threadId,
              );

            const renderedMessages =
              await this.messageService.renderMessagesByTemplate(
                currentMessages,
              );
            const messagesWithPrompt = [systemPrompt, ...renderedMessages];
            const streamParams: StreamChatParams = {
              tabId: "",
              threadId,
              userMessageId: assistantMessage.id,
              messages: await this.threadFormsService.renderTemplateByThreadId(
                threadId,
                messagesWithPrompt,
              ),
              model: modelForRun,
              provider: {} as Provider,
              tools:
                await this.mcpService.getMcpToolsAsOpenAIFunctions(
                  mcpServerIds,
                ),
              chatParameters:
                await this.threadFormsService.getChatParametersByThreadId(
                  threadId,
                ),
            };
            const result = await this.startStreamChatForMcp(
              providerInst,
              streamParams,
              abortController2,
            );
            const { toolCalls } =
              await this.processStreamWithReflectionAndActions(
                result,
                assistantMessage,
                reflectionBlockId,
                abortController2,
                threadId,
                mcpServerIds,
                currentMessages,
              );
            if (!toolCalls || toolCalls.length === 0) {
              break;
            }
          }
          await this.sequentialContentManager.completeSequentialContent(
            assistantMessage.id,
            threadId,
          );
          await this.chatService.updateMessage(assistantMessage.id, {
            status: "success",
          });
          sendToMain(EventNames.PROVIDER_CONVERSATION_COMPLETED, {
            threadId,
            regeneration: true,
          });
        } catch {
          await this.chatService.updateMessage(assistantMessage.id, {
            status: "error",
          });
          sendToMain(EventNames.PROVIDER_CONVERSATION_FAILED, { threadId });
        } finally {
          cleanupAbortController(threadId);
        }
      }
    } catch (error) {
      logger.error("Failed to rerun tool call", {
        threadId,
        messageId,
        toolCallId,
        error: this.extractErrorMessage(error),
      });
    }
  }

  /**
   * Get MCP server IDs for a thread
   */
  private async getMcpServerIdsForThread(threadId: string): Promise<string[]> {
    try {
      // First try to get tools from the thread-server associations in database
      const mcpTools = await this.mcpService.getMcpToolsForThread(
        {} as Electron.IpcMainEvent,
        threadId,
      );

      if (mcpTools.length > 0) {
        // Extract server IDs from existing thread associations
        const serverIds = await this.mcpService.getServerIdsForThread(threadId);
        return serverIds;
      }

      // If no tools found from database associations, check if we have selectedMcpServerIds from event
      const eventData = this.getLatestMessageEventData(threadId);
      if (
        eventData?.selectedMcpServerIds &&
        eventData.selectedMcpServerIds.length > 0
      ) {
        logger.info("Using MCP servers from message event for new thread", {
          threadId,
          serverIds: eventData.selectedMcpServerIds,
        });
        return eventData.selectedMcpServerIds;
      }

      return [];
    } catch (error) {
      logger.warn("Failed to get MCP server IDs for thread", {
        threadId,
        error,
      });
      return [];
    }
  }

  /**
   * Handle the tool calling loop with streaming support
   */
  private async handleToolCallingLoop(
    providerInst: BaseProviderService,
    conversationMessages: ChatMessage[],
    mcpTools: OpenAITools,
    mcpServerIds: string[],
    assistantMessage: Message,
    abortController: AbortController,
    context: { threadId: string; model: Model },
  ): Promise<string> {
    const { threadId, model } = context;
    let fullContent = "";
    const renderedMessages =
      await this.messageService.renderMessagesByTemplate(conversationMessages);
    const finalMessages =
      await this.threadFormsService.renderMessagesBySystemPrompt(
        threadId,
        renderedMessages,
      );
    const currentMessages = [...finalMessages];

    let loopCount = 0;
    const maxLoops = 10; // Prevent infinite loops

    while (loopCount < maxLoops) {
      if (abortController.signal.aborted) {
        const abortError = new Error("Tool calling loop aborted by user");
        abortError.name = "AbortError";
        throw abortError;
      }

      loopCount++;
      logger.info("Tool calling loop iteration", { threadId, loopCount });

      // Create stream parameters
      const streamParams: StreamChatParams = {
        tabId: "", // Will be filled by caller if needed
        threadId,
        userMessageId: assistantMessage.id,
        messages: await this.threadFormsService.renderTemplateByThreadId(
          threadId,
          currentMessages,
        ),
        model,
        provider: {} as Provider, // Will be filled by caller if needed
        tools: mcpTools,
        chatParameters:
          await this.threadFormsService.getChatParametersByThreadId(threadId),
      };

      sendToMain(EventNames.PROVIDER_CONVERSATION_IN_PROGRESS, {
        threadId,
      });

      // Get the streaming response
      const result = await providerInst.startStreamChat(
        streamParams,
        abortController,
      );

      // Process the stream and collect content and tool calls
      const { content, toolCalls } = await this.processStreamWithToolCalls(
        result,
        abortController,
        threadId,
      );

      // Update full content
      fullContent += content;

      // If no tool calls, we're done
      if (!toolCalls || toolCalls.length === 0) {
        logger.info("No tool calls found, ending loop", {
          threadId,
          loopCount,
        });
        break;
      }

      // Update assistant message with tool calls
      await this.updateMessageWithToolCalls(assistantMessage.id, toolCalls);

      // Execute tool calls
      logger.info("Executing tool calls", {
        threadId,
        toolCallCount: toolCalls.length,
        toolNames: toolCalls.map((tc) => tc.function.name),
      });

      let toolResults: ToolCallResult[] = [];
      try {
        toolResults = await this.mcpToolExecutor.executeToolCalls(
          toolCalls,
          threadId,
          assistantMessage.id,
          mcpServerIds,
        );
      } catch (error) {
        logger.error(
          "Failed to execute tool calls, continuing with error results",
          {
            threadId,
            error: this.extractErrorMessage(error),
          },
        );

        // Create error results for all tool calls
        toolResults = toolCalls.map((tc) => ({
          id: tc.id,
          name: tc.function.name,
          arguments: tc.function.arguments,
          status: "error" as const,
          error: `Tool execution failed: ${this.extractErrorMessage(error)}`,
        }));
      }

      // Update message with tool call results
      await this.updateMessageWithToolCalls(
        assistantMessage.id,
        toolCalls,
        toolResults,
      );

      // Add assistant message with tool calls to conversation
      const toolCallMessage: ChatMessage & { tool_calls: OpenAIToolCall[] } = {
        id: nanoid(),
        role: "assistant",
        content: content || "",
        tool_calls: toolCalls,
      };
      currentMessages.push(toolCallMessage);

      // Add tool result messages to conversation
      for (const toolResult of toolResults) {
        const toolResultMessage: ChatMessage = {
          id: nanoid(),
          role: "tool",
          tool_call_id: toolResult.id,
          content:
            toolResult.status === "success"
              ? toolResult.result || "Tool executed successfully"
              : `Error: ${toolResult.error || "Tool execution failed"}`,
        };
        currentMessages.push(toolResultMessage);
      }

      // Continue the loop to get AI's response to tool results
      logger.info("Tool calls executed, continuing conversation", {
        threadId,
        loopCount,
        successfulCalls: toolResults.filter((r) => r.status === "success")
          .length,
        failedCalls: toolResults.filter((r) => r.status === "error").length,
      });
    }

    if (loopCount >= maxLoops) {
      logger.warn("Tool calling loop reached maximum iterations", {
        threadId,
        maxLoops,
      });
    }

    return fullContent;
  }

  /**
   * Process streaming response and extract content and tool calls
   */
  private async processStreamWithToolCalls(
    stream: Stream<OpenAI.Chat.Completions.ChatCompletionChunk>,
    abortController: AbortController,
    threadId: string,
  ): Promise<{ content: string; toolCalls: OpenAIToolCall[] | null }> {
    let content = "";
    let toolCalls: OpenAIToolCall[] | null = null;
    let hasDetectedToolCalls = false; // Track if we've already detected tool calls

    // Get dynamic stream configuration
    const streamConfig = await this.getStreamConfig();

    // Initialize StreamSmoother for smooth output
    const streamSmoother = new StreamSmoother(
      async (smoothedChunk: string) => {
        // Check if request was aborted
        if (abortController.signal.aborted) {
          streamSmoother.stop();
          return;
        }

        content += smoothedChunk;

        sendToMain(EventNames.PROVIDER_CONVERSATION_IN_PROGRESS, {
          threadId,
          delta: smoothedChunk,
        });
      },
      streamConfig,
      abortController.signal,
    );

    abortController.signal.addEventListener("abort", () => {
      streamSmoother.stop();
    });

    try {
      for await (const chunk of stream) {
        if (abortController.signal.aborted) {
          const abortError = new Error("Stream aborted by user");
          abortError.name = "AbortError";
          throw abortError;
        }

        // Check for content delta
        const delta = chunk.choices[0]?.delta?.content || "";
        if (delta) {
          streamSmoother.addChunk(delta);
        }

        // Check for tool calls
        if (chunk.choices[0]?.delta?.tool_calls) {
          // CRITICAL: Flush StreamSmoother buffer when first tool call is detected
          // This ensures any buffered content gets output before tool processing begins
          if (!hasDetectedToolCalls) {
            logger.info(
              "First tool call detected, flushing remaining content",
              {
                threadId,
                bufferStats: streamSmoother.getStats(),
              },
            );

            streamSmoother.flush();
            hasDetectedToolCalls = true;

            // Small delay to ensure flush completes
            await new Promise((resolve) => setTimeout(resolve, 10));
          }

          if (!toolCalls) {
            toolCalls = [];
          }

          // Process tool call deltas
          const toolCallDeltas = chunk.choices[0].delta.tool_calls;
          for (const toolCallDelta of toolCallDeltas) {
            const index = toolCallDelta.index;

            // Initialize tool call if not exists
            if (!toolCalls[index]) {
              toolCalls[index] = {
                id: toolCallDelta.id || "",
                type: "function",
                function: {
                  name: toolCallDelta.function?.name || "",
                  arguments: "",
                },
              };
            }

            // Accumulate function arguments
            if (toolCallDelta.function?.arguments) {
              toolCalls[index].function.arguments +=
                toolCallDelta.function.arguments;
            }
          }
        }

        // Check for complete tool calls in finish_reason
        if (chunk.choices[0]?.finish_reason === "tool_calls") {
          logger.info("Stream finished with tool calls", { threadId });
          break;
        }
      }

      if (abortController.signal.aborted) {
        const abortError = new Error("Stream aborted by user after completion");
        abortError.name = "AbortError";
        throw abortError;
      }

      // Complete the stream smoother
      await new Promise<void>((resolve) => {
        streamSmoother.complete(() => {
          resolve();
        });
      });
    } catch (error) {
      streamSmoother.stop();
      logger.error("Stream processing error", { threadId, error });
      throw error;
    }

    return { content, toolCalls };
  }

  /**
   * Update message with tool calls information
   */
  private async updateMessageWithToolCalls(
    messageId: string,
    toolCalls: OpenAIToolCall[],
    toolResults?: ToolCallResult[],
  ): Promise<void> {
    let toolCallResults: ToolCallResult[];

    if (toolResults) {
      // Use provided results
      toolCallResults = toolResults;
    } else {
      // Create initial results with proper status determination
      toolCallResults = await Promise.all(
        toolCalls.map(async (tc) => {
          // Get MCP server for this tool to determine initial status
          const threadId = await this.getThreadIdForMessage(messageId);
          const mcpServerIds =
            this.getLatestMessageEventData(threadId)?.selectedMcpServerIds ||
            [];

          const mcpServerId = await this.findServerForTool(
            tc.function.name,
            mcpServerIds,
          );
          let initialStatus: "pending" | "awaiting_approval" = "pending";

          if (mcpServerId) {
            const requiresApproval =
              await this.checkToolRequiresApproval(mcpServerId);
            initialStatus = requiresApproval ? "awaiting_approval" : "pending";
          }

          return {
            id: tc.id,
            name: tc.function.name,
            arguments: tc.function.arguments,
            status: initialStatus,
            mcpServerId,
            requiresApproval: initialStatus === "awaiting_approval",
          } as ToolCallResult;
        }),
      );
    }

    await this.chatService.updateMessage(messageId, {
      toolCalls: JSON.stringify(toolCallResults),
    });
  }

  /**
   * Start stream chat with MCP-specific configuration
   * For 302AI providers, this disables URL parsing to preserve tool results
   */
  private async startStreamChatForMcp(
    providerInst: BaseProviderService,
    streamParams: StreamChatParams,
    abortController: AbortController,
  ): Promise<OpenAIStreamResponse> {
    // Check if this is a provider that supports MCP-specific features
    if (
      "startStreamChatForMcp" in providerInst &&
      typeof providerInst.startStreamChatForMcp === "function"
    ) {
      const mcpProvider = providerInst as BaseProviderService &
        McpSupportedProvider;
      return await mcpProvider.startStreamChatForMcp(
        streamParams,
        abortController,
      );
    }

    // For other providers, use the regular method
    return await providerInst.startStreamChat(streamParams, abortController);
  }

  /**
   * Handle the MCP reflection->action loop with sequential content blocks
   */
  private async handleReflectionActionLoop(
    providerInst: BaseProviderService,
    conversationMessages: ChatMessage[],
    mcpTools: OpenAITools,
    mcpServerIds: string[],
    assistantMessage: Message,
    abortController: AbortController,
    context: { threadId: string; model: Model },
  ): Promise<string> {
    const { threadId, model } = context;
    let fullContent = "";
    const currentMessages = [...conversationMessages];
    let loopCount = 0;
    const maxLoops = 10; // Prevent infinite loops

    // Initialize sequential content for this message
    await this.sequentialContentManager.initializeSequentialContent(
      assistantMessage.id,
      threadId,
    );

    // Get available tool names for the system prompt
    const availableToolNames =
      mcpTools?.map((tool) => tool.function.name) || [];

    // Add MCP reflection system prompt to guide the AI
    const mcpReflectionPrompt = {
      id: nanoid(),
      role: "system" as const,
      content: getMcpReflectionSystemPrompt(availableToolNames),
    };

    const renderedMessages =
      await this.messageService.renderMessagesByTemplate(currentMessages);
    const finalMessages =
      await this.threadFormsService.renderMessagesBySystemPrompt(
        threadId,
        renderedMessages,
      );
    const messagesWithPrompt = [mcpReflectionPrompt, ...finalMessages];

    while (loopCount < maxLoops) {
      if (abortController.signal.aborted) {
        const abortError = new Error("Reflection-action loop aborted by user");
        abortError.name = "AbortError";
        throw abortError;
      }

      loopCount++;
      logger.info("Reflection-action loop iteration", { threadId, loopCount });

      // Create a new reflection block for this iteration
      const reflectionBlockId =
        await this.sequentialContentManager.addReflectionBlock(
          assistantMessage.id,
          threadId,
        );

      // Create stream parameters
      const streamParams: StreamChatParams = {
        tabId: "",
        threadId,
        userMessageId: assistantMessage.id,
        messages: await this.threadFormsService.renderTemplateByThreadId(
          threadId,
          messagesWithPrompt,
        ),
        model,
        provider: {} as Provider,
        tools: mcpTools,
        chatParameters:
          await this.threadFormsService.getChatParametersByThreadId(threadId),
      };

      sendToMain(EventNames.PROVIDER_CONVERSATION_IN_PROGRESS, {
        threadId,
      });

      // Get the streaming response with reflection content
      const result = await this.startStreamChatForMcp(
        providerInst,
        streamParams,
        abortController,
      );

      // Process the stream with integrated tool execution
      const { reflectionContent, toolCalls } =
        await this.processStreamWithReflectionAndActions(
          result,
          assistantMessage,
          reflectionBlockId,
          abortController,
          threadId,
          mcpServerIds,
          messagesWithPrompt,
        );

      // Update the reflection block with the final content
      await this.sequentialContentManager.updateReflectionContent(
        assistantMessage.id,
        threadId,
        reflectionBlockId,
        reflectionContent,
        true, // Mark as complete
      );

      // Update full content
      fullContent += reflectionContent;

      // If no tool calls, we're done with the conversation
      if (!toolCalls || toolCalls.length === 0) {
        logger.info("No tool calls found, ending reflection-action loop", {
          threadId,
          loopCount,
        });
        break;
      }

      // Tool calls were already executed in processStreamWithReflectionAndActions
      // Just continue to the next reflection iteration
      logger.info("Tool calls executed, continuing to next reflection", {
        threadId,
        loopCount,
        toolCallCount: toolCalls.length,
      });
    }

    if (loopCount >= maxLoops) {
      logger.warn("Reflection-action loop reached maximum iterations", {
        threadId,
        maxLoops,
      });
    }

    // Mark sequential content as complete
    await this.sequentialContentManager.completeSequentialContent(
      assistantMessage.id,
      threadId,
    );

    return fullContent;
  }

  /**
   * Process streaming response with integrated tool execution
   */
  private async processStreamWithReflectionAndActions(
    stream: Stream<OpenAI.Chat.Completions.ChatCompletionChunk>,
    assistantMessage: Message,
    reflectionBlockId: string,
    abortController: AbortController,
    threadId: string,
    mcpServerIds: string[],
    currentMessages: ChatMessage[],
  ): Promise<{
    reflectionContent: string;
    toolCalls: OpenAIToolCall[] | null;
  }> {
    let reflectionContent = "";
    let toolCalls: OpenAIToolCall[] | null = null;
    let isProcessingReflection = true;

    // Get dynamic stream configuration
    const streamConfig = await this.getStreamConfig();

    logger.info("Stream configuration for reflection content", {
      threadId,
      streamConfig,
      reflectionBlockId,
    });

    // Initialize StreamSmoother for reflection content
    const streamSmoother = new StreamSmoother(
      (smoothedChunk: string) => {
        // Check if request was aborted
        if (abortController.signal.aborted) {
          streamSmoother.stop();
          return;
        }

        if (isProcessingReflection) {
          reflectionContent += smoothedChunk;

          // Update reflection block in real-time - use non-blocking call
          this.sequentialContentManager
            .updateReflectionContent(
              assistantMessage.id,
              threadId,
              reflectionBlockId,
              reflectionContent,
              false, // Not complete yet
            )
            .catch((error) => {
              logger.error("Failed to update reflection content", {
                error,
                threadId,
              });
            });

          // IMPORTANT: Also update the main message content for real-time streaming
          // This ensures the UI shows streaming updates even with MCP tools enabled
          this.chatService
            .updateMessage(assistantMessage.id, {
              content: reflectionContent,
            })
            .catch((error) => {
              logger.error(
                "Failed to update main message content during streaming",
                {
                  error,
                  threadId,
                  messageId: assistantMessage.id,
                },
              );
            });

          // Send streaming update event to frontend
          sendToMain(EventNames.PROVIDER_CONVERSATION_IN_PROGRESS, {
            threadId,
            delta: smoothedChunk,
          });
        }
      },
      streamConfig,
      abortController.signal,
    );

    abortController.signal.addEventListener("abort", () => {
      streamSmoother.stop();
    });

    try {
      for await (const chunk of stream) {
        if (abortController.signal.aborted) {
          const abortError = new Error("Stream aborted by user");
          abortError.name = "AbortError";
          throw abortError;
        }

        // Check for tool calls - when tool calls appear, stop processing reflection content
        if (chunk.choices[0]?.delta?.tool_calls) {
          // CRITICAL: Flush StreamSmoother buffer before switching to tool calls
          // This ensures any buffered content (like "，我会调用几个工具") gets output
          // before we stop processing reflection content
          if (isProcessingReflection) {
            logger.info(
              "Tool calls detected, flushing remaining reflection content",
              {
                threadId,
                reflectionBlockId,
                bufferStats: streamSmoother.getStats(),
              },
            );

            streamSmoother.flush();

            // Give a small delay to ensure flush completes before proceeding
            // This prevents race conditions between flush and tool execution
            await new Promise((resolve) => setTimeout(resolve, 10));
          }

          isProcessingReflection = false;

          if (!toolCalls) {
            toolCalls = [];
          }

          // Process tool call deltas
          const toolCallDeltas = chunk.choices[0].delta.tool_calls;
          for (const toolCallDelta of toolCallDeltas) {
            const index = toolCallDelta.index;

            // Initialize tool call if not exists
            if (!toolCalls[index]) {
              toolCalls[index] = {
                id: toolCallDelta.id || "",
                type: "function",
                function: {
                  name: toolCallDelta.function?.name || "",
                  arguments: "",
                },
              };
            }

            // Accumulate function arguments
            if (toolCallDelta.function?.arguments) {
              toolCalls[index].function.arguments +=
                toolCallDelta.function.arguments;
            }
          }
        }

        // Process content delta only if we're still in reflection mode
        if (isProcessingReflection) {
          const delta = chunk.choices[0]?.delta?.content || "";
          if (delta) {
            streamSmoother.addChunk(delta);
          }
        }

        // Check if we're done with the current stream
        if (chunk.choices[0]?.finish_reason === "tool_calls") {
          logger.info(
            "Stream finished with tool calls - executing tools immediately",
            { threadId },
          );

          // Execute tools immediately and add results to conversation
          if (toolCalls && toolCalls.length > 0) {
            await this.executeToolCallsAndUpdateConversation(
              toolCalls,
              assistantMessage,
              threadId,
              mcpServerIds,
              currentMessages,
            );
          }
          break;
        } else if (chunk.choices[0]?.finish_reason === "stop") {
          logger.info("Stream finished without tool calls", { threadId });
          break;
        }
      }

      if (abortController.signal.aborted) {
        const abortError = new Error("Stream aborted by user after completion");
        abortError.name = "AbortError";
        throw abortError;
      }

      // Complete the stream smoother
      await new Promise<void>((resolve) => {
        streamSmoother.complete(() => {
          resolve();
        });
      });

      // Final content integrity check - ensure we have the complete reflection content
      if (isProcessingReflection && reflectionContent) {
        logger.info("Final reflection content check", {
          threadId,
          reflectionBlockId,
          contentLength: reflectionContent.length,
          contentPreview: `${reflectionContent.slice(0, 100)}...`, // Log first 100 chars for debugging
        });
      }
    } catch (error) {
      streamSmoother.stop();
      logger.error("Stream processing error in reflection-action loop", {
        threadId,
        error,
      });
      throw error;
    }

    return { reflectionContent, toolCalls };
  }

  /**
   * Execute tool calls immediately and update conversation history
   */
  private async executeToolCallsAndUpdateConversation(
    toolCalls: OpenAIToolCall[],
    assistantMessage: Message,
    threadId: string,
    mcpServerIds: string[],
    currentMessages: ChatMessage[],
  ): Promise<void> {
    logger.info("Executing tool calls immediately", {
      threadId,
      toolCallCount: toolCalls.length,
    });

    for (const toolCall of toolCalls) {
      // Find which MCP server has this tool
      let mcpServerId: string | undefined;
      logger.info("Looking for MCP server for tool", {
        toolName: toolCall.function.name,
        availableServerIds: mcpServerIds,
      });

      try {
        for (const serverId of mcpServerIds) {
          logger.info("Checking server for tool", {
            serverId,
            toolName: toolCall.function.name,
          });

          const serverTools =
            await this.mcpService.getToolsFromServer(serverId);
          logger.info("Server tools found", {
            serverId,
            toolCount: serverTools.length,
            tools: serverTools.map((t) => t.name),
          });

          const hasTools = serverTools.some(
            (tool) => tool.name === toolCall.function.name,
          );
          if (hasTools) {
            mcpServerId = serverId;
            logger.info("Found server for tool", {
              toolName: toolCall.function.name,
              mcpServerId,
            });
            break;
          }
        }
      } catch (error) {
        logger.warn("Failed to find server for tool during block creation", {
          toolName: toolCall.function.name,
          mcpServerIds,
          error,
        });
      }

      if (!mcpServerId) {
        logger.warn("No server found for tool", {
          toolName: toolCall.function.name,
          mcpServerIds,
        });
      }

      // Determine initial status based on server settings
      let initialStatus: "pending" | "awaiting_approval" = "pending";
      if (mcpServerId) {
        const requiresApproval =
          await this.checkToolRequiresApproval(mcpServerId);
        initialStatus = requiresApproval ? "awaiting_approval" : "pending";
      }

      // Create a tool call block for UI with proper initial status
      const toolCallBlockId =
        await this.sequentialContentManager.addToolCallBlock(
          assistantMessage.id,
          threadId,
          {
            id: toolCall.id,
            name: toolCall.function.name,
            arguments: toolCall.function.arguments,
            status: initialStatus,
            mcpServerId, // Include mcpServerId from the start
            requiresApproval: initialStatus === "awaiting_approval",
          },
        );

      // Execute the tool call
      try {
        const toolResults = await this.mcpToolExecutor.executeToolCalls(
          [toolCall],
          threadId,
          assistantMessage.id,
          mcpServerIds,
        );

        const toolResult = toolResults[0];

        // Update the tool call block with the result
        await this.sequentialContentManager.updateToolCallBlock(
          assistantMessage.id,
          threadId,
          toolCallBlockId,
          toolResult,
        );

        // Add the tool call and result to the conversation immediately
        const toolCallMessage: ChatMessage & { tool_calls: OpenAIToolCall[] } =
          {
            id: nanoid(),
            role: "assistant",
            content: "",
            tool_calls: [toolCall],
          };
        currentMessages.push(toolCallMessage);

        const toolResultMessage: ChatMessage = {
          id: nanoid(),
          role: "tool",
          content: toolResult.result || "Tool execution completed",
          tool_call_id: toolCall.id,
        };
        logger.info("Tool result message", {
          toolResultMessage,
        });
        currentMessages.push(toolResultMessage);

        logger.info("Tool call executed and added to conversation", {
          threadId,
          toolName: toolCall.function.name,
          resultLength: toolResult.result?.length || 0,
          status: toolResult.status,
        });
      } catch (error) {
        logger.error("Tool execution failed", {
          threadId,
          toolCall: toolCall.function.name,
          error,
        });

        // Update tool call block with error
        await this.sequentialContentManager.updateToolCallBlock(
          assistantMessage.id,
          threadId,
          toolCallBlockId,
          {
            id: toolCall.id,
            name: toolCall.function.name,
            arguments: toolCall.function.arguments,
            status: "error",
            result: `Error: ${this.extractErrorMessage(error)}`,
          },
        );

        // Add error message to conversation
        const errorMessage: ChatMessage = {
          id: nanoid(),
          role: "tool",
          content: `Error executing ${toolCall.function.name}: ${this.extractErrorMessage(error)}`,
          tool_call_id: toolCall.id,
        };
        currentMessages.push(errorMessage);
      }
    }
  }

  /**
   * Extract error message from error object
   */
  private extractErrorMessage(error: unknown): string {
    if (error instanceof Error) {
      return error.message;
    }
    if (typeof error === "string") {
      return error;
    }
    return "Unknown error occurred";
  }

  /**
   * Handle tool call start event - update sequential content status
   */
  private async handleToolCallStart(
    threadId: string,
    messageId: string,
    toolCallId: string,
  ): Promise<void> {
    try {
      // Find the tool call block ID from sequential content
      const blockId = await this.findToolCallBlockId(messageId, toolCallId);
      if (!blockId) {
        logger.warn("Tool call block not found for start event", {
          threadId,
          messageId,
          toolCallId,
        });
        return;
      }

      // Update the tool call with pending status
      await this.sequentialContentManager.updateToolCallBlock(
        messageId,
        threadId,
        blockId,
        {
          id: toolCallId,
          status: "pending",
        } as ToolCallResult,
      );
    } catch (error) {
      logger.error("Failed to handle tool call start event", {
        threadId,
        messageId,
        toolCallId,
        error: this.extractErrorMessage(error),
      });
    }
  }

  /**
   * Handle tool call success event - update sequential content with result
   */
  private async handleToolCallSuccess(
    threadId: string,
    messageId: string,
    toolCallId: string,
    result: string,
    executionTime: number,
  ): Promise<void> {
    try {
      // Find the tool call block ID from sequential content
      const blockId = await this.findToolCallBlockId(messageId, toolCallId);
      if (!blockId) {
        logger.warn("Tool call block not found for success event", {
          threadId,
          messageId,
          toolCallId,
        });
        return;
      }

      // Update the tool call with success status and result
      await this.sequentialContentManager.updateToolCallBlock(
        messageId,
        threadId,
        blockId,
        {
          id: toolCallId,
          status: "success",
          result,
          executionTime,
        } as ToolCallResult,
      );
    } catch (error) {
      logger.error("Failed to handle tool call success event", {
        threadId,
        messageId,
        toolCallId,
        error: this.extractErrorMessage(error),
      });
    }
  }

  /**
   * Handle tool call error event - update sequential content with error
   */
  private async handleToolCallError(
    threadId: string,
    messageId: string,
    toolCallId: string,
    errorMessage: string,
  ): Promise<void> {
    try {
      // Find the tool call block ID from sequential content
      const blockId = await this.findToolCallBlockId(messageId, toolCallId);
      if (!blockId) {
        logger.warn("Tool call block not found for error event", {
          threadId,
          messageId,
          toolCallId,
        });
        return;
      }

      // Update the tool call with error status
      await this.sequentialContentManager.updateToolCallBlock(
        messageId,
        threadId,
        blockId,
        {
          id: toolCallId,
          status: "error",
          error: errorMessage,
        } as ToolCallResult,
      );
    } catch (error) {
      logger.error("Failed to handle tool call error event", {
        threadId,
        messageId,
        toolCallId,
        error: this.extractErrorMessage(error),
      });
    }
  }

  /**
   * Find the block ID for a tool call in sequential content
   */
  private async findToolCallBlockId(
    messageId: string,
    toolCallId: string,
  ): Promise<string | null> {
    return this.sequentialContentManager.findToolCallBlockId(
      messageId,
      toolCallId,
    );
  }

  /**
   * Get thread ID for a message
   */
  private async getThreadIdForMessage(messageId: string): Promise<string> {
    try {
      const message = await this.messageService.getMessageById(
        {} as Electron.IpcMainEvent,
        messageId,
      );
      return message?.threadId || "";
    } catch (error) {
      logger.error("Failed to get thread ID for message", {
        messageId,
        error: this.extractErrorMessage(error),
      });
      return "";
    }
  }

  /**
   * Find which MCP server has the specified tool
   */
  private async findServerForTool(
    toolName: string,
    serverIds: string[],
  ): Promise<string | null> {
    try {
      for (const serverId of serverIds) {
        try {
          const serverTools =
            await this.mcpService.getToolsFromDatabase(serverId);
          const hasTools = serverTools.some(
            (tool: { name: string }) => tool.name === toolName,
          );
          if (hasTools) {
            return serverId;
          }
        } catch (error) {
          logger.warn("Failed to get tools from database for MCP server", {
            serverId,
            toolName,
            error: this.extractErrorMessage(error),
          });
        }
      }
      return null;
    } catch (error) {
      logger.error("Failed to find server for tool", {
        toolName,
        serverIds,
        error: this.extractErrorMessage(error),
      });
      return null;
    }
  }

  /**
   * Check if a tool requires user approval based on MCP server settings
   */
  private async checkToolRequiresApproval(
    mcpServerId: string,
  ): Promise<boolean> {
    try {
      const server =
        await this.mcpService.getMcpServerByIdInternal(mcpServerId);
      if (!server || !server.advancedSettings) {
        return false;
      }
      return server.advancedSettings.autoUseTool === false;
    } catch (error) {
      logger.error("Failed to check tool approval requirement", {
        mcpServerId,
        error: this.extractErrorMessage(error),
      });
      return false;
    }
  }
}
