import json
from abc import ABC, abstractmethod
from collections.abc import Callable, Sequence
from typing import Any, TypeVar
from uuid import uuid4

from langchain_anthropic import ChatAnthropic
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import (
    AIMessage as LangchainAIMessage,
    BaseMessage,
    HumanMessage as LangchainHumanMessage,
)
from langchain_core.tools import BaseTool
from langchain_core.utils.function_calling import convert_to_openai_tool
from pydantic import BaseModel

from posthog.schema import AgentMode, AssistantMessage, AssistantToolCallMessage, ContextMessage, HumanMessage

from posthog.sync import database_sync_to_async

from ee.hogai.context.prompts import CONTEXT_INITIAL_MODE_PROMPT
from ee.hogai.core.agent_modes.prompts import ROOT_AGENT_MODE_REMINDER_PROMPT
from ee.hogai.tools.switch_mode import SWITCH_MODE_TOOL_NAME
from ee.hogai.utils.helpers import find_start_message, find_start_message_idx, insert_messages_before_start
from ee.hogai.utils.types import AssistantMessageUnion

T = TypeVar("T", bound=AssistantMessageUnion)

LangchainTools = Sequence[dict[str, Any] | type | Callable | BaseTool]


class InsertionResult(BaseModel):
    messages: Sequence[AssistantMessageUnion]
    updated_start_id: str
    updated_window_start_id: str


class ConversationCompactionManager(ABC):
    """
    Manages conversation window boundaries, message filtering, and summarization decisions.
    """

    CONVERSATION_WINDOW_SIZE = 100_000
    """
    Determines the maximum number of tokens allowed in the conversation window.
    """
    APPROXIMATE_TOKEN_LENGTH = 4
    """
    Determines the approximate number of characters per token.
    """

    def find_window_boundary(self, messages: Sequence[T], max_messages: int = 10, max_tokens: int = 1000) -> str | None:
        """
        Find the optimal window start ID based on message count and token limits.
        Ensures the window starts at a human or assistant message.
        """

        new_window_id: str | None = None
        for message in reversed(messages):
            # Handle limits before assigning the window ID.
            max_tokens -= self._get_estimated_assistant_message_tokens(message)
            max_messages -= 1
            if max_tokens < 0 or max_messages < 0:
                break

            # Assign the new new window ID.
            if message.id is not None:
                if isinstance(message, HumanMessage):
                    new_window_id = message.id
                if isinstance(message, AssistantMessage):
                    new_window_id = message.id

        return new_window_id

    def get_messages_in_window(self, messages: Sequence[T], window_start_id: str | None = None) -> Sequence[T]:
        """
        Filter messages to only those within the conversation window.
        """
        if window_start_id is not None:
            return self._get_conversation_window(messages, window_start_id)
        return messages

    async def should_compact_conversation(
        self, model: BaseChatModel, messages: list[BaseMessage], tools: LangchainTools | None = None, **kwargs
    ) -> bool:
        """
        Determine if the conversation should be summarized based on token count.
        Avoids summarizing if there are only two human messages or fewer.
        """
        return await self.calculate_token_count(model, messages, tools, **kwargs) > self.CONVERSATION_WINDOW_SIZE

    async def calculate_token_count(
        self, model: BaseChatModel, messages: list[BaseMessage], tools: LangchainTools | None = None, **kwargs
    ) -> int:
        """
        Calculate the token count for a conversation.
        """
        # Avoid summarizing the conversation if there is only two human messages.
        human_messages = [message for message in messages if isinstance(message, LangchainHumanMessage)]
        if len(human_messages) <= 2:
            tool_tokens = self._get_estimated_tools_tokens(tools) if tools else 0
            return sum(self._get_estimated_langchain_message_tokens(message) for message in messages) + tool_tokens
        return await self._get_token_count(model, messages, tools, **kwargs)

    def update_window(
        self,
        messages: Sequence[T],
        summary_message: ContextMessage,
        agent_mode: AgentMode,
        start_id: str | None = None,
        # TODO: Delete when modes are rolled out
        is_modes_feature_flag_enabled: bool = False,
    ) -> InsertionResult:
        """Finds the optimal position to insert the summary message in the conversation window."""
        window_start_id_candidate = self.find_window_boundary(messages, max_messages=16, max_tokens=2048)
        start_message = find_start_message(messages, start_id)
        if not start_message:
            raise ValueError("Start message not found")

        start_message_copy = start_message.model_copy(deep=True)
        start_message_copy.id = str(uuid4())

        # The last messages were too large to fit into the window. Copy the last human message to the start of the window.
        if not window_start_id_candidate:
            return self._handle_no_window_boundary(
                messages, summary_message, start_message_copy, agent_mode, is_modes_feature_flag_enabled
            )

        # Find the updated window
        start_message_idx = find_start_message_idx(messages, window_start_id_candidate)
        new_window = messages[start_message_idx:]

        # If the start human message is in the window, insert the summary message before it
        # and update the window start.
        if start_id and next((m for m in new_window if m.id == start_id), None):
            return self._handle_start_in_window(
                messages,
                summary_message,
                start_id,
                window_start_id_candidate,
                agent_mode,
                is_modes_feature_flag_enabled,
            )

        # If the start message is not in the window, insert the summary message and human message at the start of the window.
        return self._handle_start_outside_window(
            new_window,
            summary_message,
            start_message_copy,
            window_start_id_candidate,
            agent_mode,
            is_modes_feature_flag_enabled,
        )

    def _handle_no_window_boundary(
        self,
        messages: Sequence[T],
        summary_message: ContextMessage,
        start_message_copy: HumanMessage,
        agent_mode: AgentMode,
        is_modes_feature_flag_enabled: bool,
    ) -> InsertionResult:
        """Handle case where no window boundary was found (messages too large)."""
        result_messages = [*messages, summary_message, start_message_copy]
        # Check if we need to add a mode reminder
        if is_modes_feature_flag_enabled and (context_message := self._get_mode_message(result_messages, agent_mode)):
            # Insert mode reminder right after summary message
            result_messages = [*messages, summary_message, context_message, start_message_copy]
        return InsertionResult(
            messages=result_messages,
            updated_start_id=start_message_copy.id,
            updated_window_start_id=summary_message.id,
        )

    def _handle_start_in_window(
        self,
        messages: Sequence[T],
        summary_message: ContextMessage,
        start_id: str,
        window_start_id_candidate: str,
        agent_mode: AgentMode,
        is_modes_feature_flag_enabled: bool,
    ) -> InsertionResult:
        """Handle case where start message is within the window boundary."""
        updated_messages = insert_messages_before_start(messages, [summary_message], start_id=start_id)
        if summary_message.id:
            updated_messages = list(
                self._insert_mode_reminder_after_summary(
                    updated_messages, summary_message.id, agent_mode, is_modes_feature_flag_enabled
                )
            )
        return InsertionResult(
            messages=updated_messages,
            updated_start_id=start_id,
            updated_window_start_id=window_start_id_candidate,
        )

    def _handle_start_outside_window(
        self,
        new_window: Sequence[T],
        summary_message: ContextMessage,
        start_message_copy: HumanMessage,
        window_start_id_candidate: str,
        agent_mode: AgentMode,
        is_modes_feature_flag_enabled: bool,
    ) -> InsertionResult:
        """Handle case where start message is outside the window boundary."""
        updated_messages = list(
            insert_messages_before_start(
                new_window, [summary_message, start_message_copy], start_id=window_start_id_candidate
            )
        )
        summary_id = summary_message.id
        if summary_id:
            updated_messages = list(
                self._insert_mode_reminder_after_summary(
                    updated_messages, summary_id, agent_mode, is_modes_feature_flag_enabled
                )
            )
        return InsertionResult(
            messages=updated_messages,
            updated_start_id=start_message_copy.id,
            updated_window_start_id=window_start_id_candidate,
        )

    def _insert_mode_reminder_after_summary(
        self,
        messages: Sequence[T],
        summary_id: str,
        agent_mode: AgentMode,
        is_modes_feature_flag_enabled: bool,
    ) -> Sequence[T]:
        """Insert mode reminder right after the summary message if needed."""
        if not is_modes_feature_flag_enabled:
            return messages
        context_message = self._get_mode_message(messages, agent_mode)
        if not context_message:
            return messages
        summary_idx = next(i for i, msg in enumerate(messages) if msg.id == summary_id)
        return [
            *messages[: summary_idx + 1],
            context_message,
            *messages[summary_idx + 1 :],
        ]

    def _get_estimated_assistant_message_tokens(self, message: AssistantMessageUnion) -> int:
        """
        Estimate token count for a message using character/4 heuristic.
        """
        char_count = 0
        if isinstance(message, HumanMessage):
            char_count = len(message.content)
        elif isinstance(message, AssistantMessage):
            char_count = len(message.content) + sum(
                len(json.dumps(m.args, separators=(",", ":"))) for m in message.tool_calls or []
            )
        elif isinstance(message, AssistantToolCallMessage):
            char_count = len(message.content)
        return round(char_count / self.APPROXIMATE_TOKEN_LENGTH)

    def _get_estimated_langchain_message_tokens(self, message: BaseMessage) -> int:
        """
        Estimate token count for a message using character/4 heuristic.
        """
        char_count = 0
        if isinstance(message.content, str):
            char_count = len(message.content)
        else:
            for content in message.content:
                if isinstance(content, str):
                    char_count += len(content)
                elif isinstance(content, dict):
                    char_count += self._count_json_tokens(content)
        if isinstance(message, LangchainAIMessage) and message.tool_calls:
            for tool_call in message.tool_calls:
                char_count += len(json.dumps(tool_call, separators=(",", ":")))
        return round(char_count / self.APPROXIMATE_TOKEN_LENGTH)

    def _get_conversation_window(self, messages: Sequence[T], start_id: str) -> Sequence[T]:
        """
        Get messages from the start_id onwards.
        """
        for idx, message in enumerate(messages):
            if message.id == start_id:
                return messages[idx:]
        return messages

    def _get_estimated_tools_tokens(self, tools: LangchainTools) -> int:
        """
        Estimate token count for tools by converting them to JSON schemas.
        """
        if not tools:
            return 0

        total_chars = 0
        for tool in tools:
            tool_schema = convert_to_openai_tool(tool)
            total_chars += self._count_json_tokens(tool_schema)
        return round(total_chars / self.APPROXIMATE_TOKEN_LENGTH)

    def _count_json_tokens(self, json_data: dict) -> int:
        return len(json.dumps(json_data, separators=(",", ":")))

    @abstractmethod
    async def _get_token_count(
        self,
        model: Any,
        messages: list[BaseMessage],
        tools: LangchainTools | None = None,
        thinking_config: dict[str, Any] | None = None,
        **kwargs,
    ) -> int:
        raise NotImplementedError

    def _get_mode_message(
        self, updated_messages: Sequence[AssistantMessageUnion], agent_mode: AgentMode
    ) -> ContextMessage | None:
        if not self._should_add_mode_reminder(updated_messages):
            return None
        return ContextMessage(
            content=ROOT_AGENT_MODE_REMINDER_PROMPT.format(mode=agent_mode.value),
            id=str(uuid4()),
        )

    def _should_add_mode_reminder(self, messages: Sequence[AssistantMessageUnion]) -> bool:
        """
        Determine if a mode reminder should be added to the messages.
        Returns True if:
        - agent_mode is set
        - mode is not evident in the messages (no switch_mode call for current mode)
        - initial mode message is not present in the messages
        """
        if self._has_initial_mode_message(messages):
            return False
        if self._is_mode_evident_in_window(messages):
            return False
        return True

    def _is_mode_evident_in_window(self, messages: Sequence[AssistantMessageUnion]) -> bool:
        """
        Check if the current agent mode is evident in the conversation window.
        Returns True if there's a switch_mode tool call for the current mode in the messages.
        """

        for message in messages:
            if isinstance(message, AssistantMessage) and message.tool_calls:
                for tool_call in message.tool_calls:
                    if tool_call.name == SWITCH_MODE_TOOL_NAME:
                        return True
        return False

    def _has_initial_mode_message(self, messages: Sequence[AssistantMessageUnion]) -> bool:
        """
        Check if the initial mode message from the context manager is present in the messages.
        """
        for message in messages:
            if isinstance(message, ContextMessage) and CONTEXT_INITIAL_MODE_PROMPT in message.content:
                return True
        return False


class AnthropicConversationCompactionManager(ConversationCompactionManager):
    async def _get_token_count(
        self,
        model: ChatAnthropic,
        messages: list[BaseMessage],
        tools: LangchainTools | None = None,
        thinking_config: dict[str, Any] | None = None,
        **kwargs,
    ) -> int:
        return await database_sync_to_async(model.get_num_tokens_from_messages, thread_sensitive=False)(
            messages, thinking=thinking_config, tools=tools
        )
