import type { AttachmentFile } from "@renderer/hooks/use-attachments";
import { useModels, useProviders } from "@renderer/queries";
import {
  useNewChatModelId,
  useSelectedModelId,
} from "@renderer/queries/hooks/use-settings";
import { useTabs } from "@renderer/queries/hooks/use-tabs";
import { useThreads } from "@renderer/queries/hooks/use-threads";
import logger from "@shared/logger/renderer-logger";
import type {
  CreateThreadData,
  Model,
  Provider,
  Thread,
} from "@shared/triplit/types";
import { useEffect, useState } from "react";
import { useTranslation } from "react-i18next";
import { toast } from "sonner";
import { useActiveTab } from "./use-active-tab";
import { useActiveThread } from "./use-active-thread";
import { usePrivacyMode } from "./use-privacy-mode";

const {
  threadService,
  tabService,
  messageService,
  providerService,
  settingsService,
  attachmentService,
  mcpService,
} = window.service;

export function useToolBar() {
  const { t } = useTranslation();
  const { activeThreadId, setActiveThreadId } = useActiveThread();
  const { activeTab, activeTabId, setActiveTabId } = useActiveTab();
  const { privacyState } = usePrivacyMode();

  const { data: tabs = [] } = useTabs();
  const { data: threads = [] } = useThreads();

  // Query providers and models
  const { data: providers } = useProviders();
  const { data: models } = useModels();

  const { data: globalSelectedModelId } = useSelectedModelId();
  const { data: newChatModelId } = useNewChatModelId();

  const isNewChat = !activeThreadId || !activeTab?.threadId;

  const [currentNewChatModelId, setCurrentNewChatModelId] =
    useState<string>("");
  const [currentTabId, setCurrentTabId] = useState<string | null>(null);

  useEffect(() => {
    if (isNewChat) {
      const tabId = activeTab?.id || null;
      if (tabId !== currentTabId) {
        const defaultModelId =
          (newChatModelId || "") === "use-last-model"
            ? globalSelectedModelId || ""
            : newChatModelId || "";
        setCurrentNewChatModelId(defaultModelId);
        setCurrentTabId(tabId);
      }
    } else {
      // 清理新会话的临时状态
      setCurrentNewChatModelId("");
      setCurrentTabId(null);
    }
  }, [
    isNewChat,
    activeTab?.id,
    newChatModelId,
    globalSelectedModelId,
    currentTabId,
  ]);

  // 根据是否为新会话来决定显示的模型ID
  const selectedModelId = isNewChat
    ? currentNewChatModelId || globalSelectedModelId || ""
    : globalSelectedModelId || "";

  const handleModelSelect = async (modelId: string) => {
    if (isNewChat) {
      // 对于新会话，只在当前会话期间临时记录用户的选择
      setCurrentNewChatModelId(modelId);
    } else {
      // 对于已存在的会话，更新全局selectedModelId和线程模型
      await settingsService.updateSelectedModelId(modelId);

      if (activeThreadId) {
        try {
          const providerId = models?.find((m) => m.id === modelId)?.providerId;
          if (providerId) {
            await threadService.updateThreadModel(
              activeThreadId,
              modelId,
              providerId,
            );
          }
        } catch (error) {
          logger.error("update thread error", { error });
        }
      }
    }
  };

  const createThread = async (
    threadData: CreateThreadData,
  ): Promise<Thread | null> => {
    try {
      const { title, modelId, providerId } = threadData;
      const createData: CreateThreadData = {
        title,
        modelId,
        providerId,
        isPrivate: privacyState.isPrivate,
      };

      const thread = await threadService.createNewThread(createData);
      await setActiveThreadId(thread.id);
      return thread;
    } catch (error) {
      logger.error("create thread error", { error });
      toast.error(t("thread.create-thread-error"));
      return null;
    }
  };

  const startStreamChat = async (
    tabId: string,
    threadId: string,
    userMessageId: string,
    messages: Array<{
      role: "user" | "assistant" | "system" | "function";
      content: string;
      id: string; // Include message ID for attachment lookup
    }>,
    provider: Provider,
    model: Model,
    regeneration: boolean,
  ) => {
    try {
      const result = await providerService.startStreamChat({
        tabId,
        threadId,
        userMessageId,
        messages,
        provider,
        model,
        regeneration,
      });

      if (!result.success) {
        throw new Error(result.error);
      }

      return result;
    } catch (error) {
      logger.error("Failed to start stream chat", { error });
      throw error;
    }
  };

  const handleSendMessage = async (
    content: string,
    attachments?: AttachmentFile[],
    editMessageId?: string,
    tabId?: string,
    threadId?: string,
    selectedMcpServerIds?: string[],
  ): Promise<void> => {
    try {
      let currentActiveThreadId: string | null = threadId || activeThreadId;
      let currentActiveTabId: string | null = tabId || activeTabId;

      if (!selectedModelId) {
        throw new Error("No model selected");
      }

      // Ensure providers and models are loaded before proceeding
      if (!providers || providers.length === 0) {
        logger.warn("Providers not loaded yet, retrying in 100ms");
        throw new Error("Providers not loaded yet, please try again");
      }

      if (!models || models.length === 0) {
        logger.warn("Models not loaded yet, retrying in 100ms");
        throw new Error("Models not loaded yet, please try again");
      }

      // Find the selected model and its provider
      const selectedModel = models?.find(
        (model) => model.id === selectedModelId,
      );
      if (!selectedModel) {
        logger.error("Selected model not found", {
          selectedModelId,
          availableModels:
            models?.map((m) => ({
              id: m.id,
              name: m.name,
              providerId: m.providerId,
            })) || [],
        });
        throw new Error("Selected model not found");
      }

      const provider = providers?.find(
        (p) => p.id === selectedModel.providerId,
      );
      if (!provider) {
        logger.error("Provider not found for selected model", {
          selectedModelId,
          selectedModelProviderId: selectedModel.providerId,
          availableProviders:
            providers?.map((p) => ({
              id: p.id,
              name: p.name,
              enabled: p.enabled,
            })) || [],
          providersCount: providers?.length || 0,
        });
        throw new Error(
          `Provider ${selectedModel.providerId} not found for selected model. Available providers: ${providers?.map((p) => p.name).join(", ") || "none"}`,
        );
      }

      const needCreateTab = tabs?.length === 0;
      const needCreateThread = needCreateTab || !activeTab?.threadId;
      if (needCreateThread) {
        const thread = await createThread({
          title:
            (privacyState.isPrivate
              ? t("thread.private-thread-title")
              : content) || " ",
          modelId: selectedModelId,
          providerId: provider.id,
          isPrivate: privacyState.isPrivate,
        });

        if (thread) {
          const { id, title } = thread;
          if (activeTab) {
            await tabService.updateTab(activeTab.id, {
              title: privacyState.isPrivate
                ? t("thread.private-thread-title")
                : title,
              threadId: id,
            });
          } else {
            const newTab = await tabService.insertTab({
              title: privacyState.isPrivate
                ? t("thread.private-thread-title")
                : title,
              threadId: id,
              type: "thread",
              isPrivate: privacyState.isPrivate,
            });
            await setActiveTabId(newTab.id);
            currentActiveTabId = newTab.id;
          }

          currentActiveThreadId = thread.id;
          logger.debug("current active thread id", { currentActiveThreadId });
        }
      }

      if (!currentActiveThreadId || !currentActiveTabId) {
        throw new Error("No active thread or tab available");
      }

      // Get existing messages for context
      const existingMessages = await messageService.getMessagesByThreadId(
        currentActiveThreadId,
      );

      // 处理编辑模型下的发送
      if (editMessageId !== "") {
        const messageToEdit = existingMessages.find(
          (m) => m.id === editMessageId,
        );
        if (!messageToEdit) {
          throw new Error("Message to edit not found");
        }
        const messageIndex = existingMessages.findIndex(
          (m) => m.id === editMessageId,
        );
        const context = existingMessages.slice(0, messageIndex);

        const messagesToDelete = existingMessages.slice(messageIndex);
        for (const msg of messagesToDelete) {
          await messageService.deleteMessage(msg.id, msg.threadId);
          if (!messageToEdit) {
            throw new Error("Message to edit not found");
          }
        }

        try {
          const userMessage = await messageService.sendUserMessage({
            threadId: currentActiveThreadId,
            content,
            orderSeq: messageToEdit.orderSeq,
            modelId: selectedModelId,
            modelName: selectedModel.name,
            providerId: provider.id,
            selectedMcpServerIds,
          });

          if (attachments && attachments.length > 0) {
            const attachmentData = attachments.map((attachment) => ({
              messageId: userMessage.id,
              name: attachment.name,
              size: attachment.size,
              type: attachment.type,
              filePath: attachment.filePath,
              preview: attachment.preview || null,
              fileData: attachment.fileData || null,
              fileContent: null,
            }));

            await attachmentService.insertAttachments(attachmentData);
          }

          const updatedConversationMessages = [
            ...context.map((msg) => ({
              role: msg.role as "user" | "assistant" | "system" | "function",
              content: msg.content,
              id: msg.id, // Include message ID for attachment lookup
            })),
            {
              role: "user" as const,
              content,
              id: userMessage.id, // Include the new message ID for attachment lookup
            },
          ];

          await startStreamChat(
            currentActiveTabId,
            currentActiveThreadId,
            userMessage.id,
            updatedConversationMessages,
            provider,
            selectedModel,
            false,
          );
        } catch (streamError) {
          logger.error("Failed to start streaming chat", { streamError });
          toast.error(t("thread.failed-to-generate-ai-response"));
          // Error handling is now done in the streaming hook
        }
        return;
      }

      const nextOrderSeq = existingMessages.length + 1;

      // Insert user message
      const userMessage = await messageService.sendUserMessage({
        threadId: currentActiveThreadId,
        content,
        orderSeq: nextOrderSeq,
        modelId: selectedModelId,
        modelName: selectedModel.name,
        providerId: provider.id,
        selectedMcpServerIds,
      });

      if (attachments && attachments.length > 0) {
        const attachmentData = attachments.map((attachment) => ({
          messageId: userMessage.id,
          name: attachment.name,
          size: attachment.size,
          type: attachment.type,
          filePath: attachment.filePath,
          preview: attachment.preview || null,
          fileData: attachment.fileData || null,
          fileContent: null,
        }));

        await attachmentService.insertAttachments(attachmentData);
      }

      logger.info("User message sent successfully", { userMessage });

      const conversationMessages = [
        ...existingMessages.map((msg) => ({
          role: msg.role as "user" | "assistant" | "system" | "function",
          content: msg.content,
          id: msg.id,
        })),
        {
          role: "user" as const,
          content,
          id: userMessage.id,
        },
      ];

      // Start streaming chat
      try {
        await startStreamChat(
          currentActiveTabId,
          currentActiveThreadId,
          userMessage.id,
          conversationMessages,
          provider,
          selectedModel, // Use model name for the API call
          false,
        );
      } catch (streamError) {
        logger.error("Failed to start streaming chat", {
          streamError,
          streamErrorMessage:
            streamError instanceof Error
              ? streamError.message
              : "Unknown error",
          activeThreadId: currentActiveThreadId,
          activeTabId: currentActiveTabId,
          selectedModelId,
          providerId: provider.id,
        });

        // Show more specific error message
        const errorMessage =
          streamError instanceof Error && streamError.message
            ? `${t("thread.failed-to-generate-ai-response")}: ${streamError.message}`
            : t("thread.failed-to-generate-ai-response");

        toast.error(errorMessage);
        // Error handling is now done in the streaming hook
      }
    } catch (error) {
      logger.error("Failed to send message", { error });
      throw error;
    }
  };

  // Effect: Sync model selection with active thread
  useEffect(() => {
    const syncSelectedModelId = async () => {
      if (activeThreadId) {
        const activeThread = threads.find(
          (thread) => thread.id === activeThreadId,
        );
        if (activeThread) {
          await settingsService.updateSelectedModelId(activeThread.modelId);
        }
      }
    };

    syncSelectedModelId();
  }, [activeThreadId, threads]);

  const handleRefreshMessage = async (messageId: string) => {
    // Find the selected model and its provider
    const selectedModel = models?.find((model) => model.id === selectedModelId);
    if (!selectedModel) {
      toast.error(`${t("thread.selected-model-not-found")}`);
      return;
    }

    const provider = providers?.find((p) => p.id === selectedModel.providerId);
    if (!provider) {
      toast.error(`${t("thread.provider-not-found-for-selected-model")}`);
      return;
    }

    if (!activeThreadId) {
      return;
    }

    const existingMessages =
      await messageService.getMessagesByThreadId(activeThreadId);
    const messageToRefresh = existingMessages.find((m) => m.id === messageId);
    logger.debug("messageToRefresh", { messageId });
    if (!messageToRefresh) {
      toast.error(`${t("thread.message-not-found")}`);
      return;
    }

    const messageIndex = existingMessages.findIndex(
      (m) => m.id === messageToRefresh.id,
    );
    const context = existingMessages.slice(0, messageIndex);

    // Retrieve MCP server IDs for this thread to maintain context
    let selectedMcpServerIds: string[] = [];
    try {
      const threadMcpServers =
        await mcpService.getThreadMcpServersByThreadId(activeThreadId);
      selectedMcpServerIds = threadMcpServers
        .filter((tms) => tms.enabled)
        .map((tms) => tms.mcpServerId);

      logger.info("Retrieved MCP server IDs for regeneration", {
        threadId: activeThreadId,
        selectedMcpServerIds,
        totalServers: threadMcpServers.length,
      });
    } catch (error) {
      logger.warn("Failed to retrieve MCP server IDs for regeneration", {
        threadId: activeThreadId,
        error,
      });
      // Continue without MCP servers if retrieval fails
    }

    // Delete all messages after the message to refresh (including the message itself)
    const messagesToDelete = existingMessages.slice(messageIndex);
    for (const msg of messagesToDelete) {
      try {
        await messageService.deleteMessage(msg.id, msg.threadId);
      } catch (error) {
        logger.error("Failed to delete message", { msgId: msg.id, error });
      }
    }

    // 重新生成AI消息
    const conversationMessages = [
      ...context.map((msg) => ({
        role: msg.role as "user" | "assistant" | "system" | "function",
        content: msg.content,
        id: msg.id, // Include message ID for attachment lookup
      })),
    ];

    // Find the last user message before the message to refresh
    const lastUserMessage = context
      .reverse()
      .find((msg) => msg.role === "user");
    const userMessageId = lastUserMessage?.id || "";

    // Emit MESSAGE_SEND_FROM_USER event to cache MCP server context
    // This ensures the backend has the proper MCP context for streaming
    if (selectedMcpServerIds.length > 0) {
      logger.info("Preparing MCP context for regeneration", {
        threadId: activeThreadId,
        selectedMcpServerIds,
      });

      // Create a minimal temporary user message just to trigger the MCP context caching
      // We'll use the last user message content and sequence to avoid disrupting the flow
      try {
        const tempUserMessage = await messageService.sendUserMessage({
          threadId: activeThreadId,
          content: lastUserMessage?.content || "regeneration context",
          orderSeq: lastUserMessage?.orderSeq || 0,
          modelId: selectedModelId,
          modelName: selectedModel.name,
          providerId: provider.id,
          selectedMcpServerIds,
        });

        // Immediately delete the temporary message since we don't need it
        await messageService.deleteMessage(tempUserMessage.id, activeThreadId);

        logger.info("Successfully cached MCP context for regeneration", {
          threadId: activeThreadId,
          selectedMcpServerIds,
        });
      } catch (error) {
        logger.warn("Failed to cache MCP context for regeneration", {
          threadId: activeThreadId,
          error,
        });
        // Continue anyway, the regeneration may work without proper streaming
      }
    }

    // Use regular stream chat since we deleted the original message
    try {
      const data = await startStreamChat(
        activeTab?.id || "", // tabId
        activeThreadId, // threadId
        userMessageId, // userMessageId - the last user message that triggered this response
        conversationMessages,
        provider,
        selectedModel, // Use model name for the API call
        true,
      );
      logger.debug("Regenerate data", { data });
    } catch (streamError) {
      logger.error("Failed to regenerate streaming chat", { streamError });
      toast.error(t("thread.failed-to-generate-ai-response"));
    }
  };

  return {
    selectedModelId,
    handleModelSelect,
    handleSendMessage,
    createThread,
    handleRefreshMessage,
  };
}
