from __future__ import annotations

import json
import time
import hashlib
from dataclasses import dataclass, field, fields
from typing import List, Optional, Annotated, Any, Union, Dict
from enum import Enum

from langchain_core.messages import AnyMessage, BaseMessage
from langgraph.graph import add_messages
from langchain_core.runnables import RunnableConfig, ensure_config, AddableDict

from ..config.agents import AGENT_FINAL_ANSWER_STR, get_action_final_answer

def add_force_goto(left: GraphNodeParam, right: GraphNodeParam) -> GraphNodeParam:
    """Reducer function to handle multiple force_goto values by keeping the latest one"""
    return right if right and right.type else left


def add_usages(left: List[Usage], right: List[Usage]) -> List[Usage]:
    """Reducer function to accumulate usages by model, avoiding duplicate accumulation"""
    if not left:
        return right or []
    if not right:
        return left
    
    # Use MD5 hash to quickly check if usage lists are identical
    left_hash = hashlib.md5(str(sorted([usage.to_dict() for usage in left], key=lambda x: x['model'])).encode()).hexdigest()
    right_hash = hashlib.md5(str(sorted([usage.to_dict() for usage in right], key=lambda x: x['model'])).encode()).hexdigest()
    
    if left_hash == right_hash:
        return left
    
    usage_dict = {}
    
    # Process both left and right usages
    for usage in left + right:
        if usage.model in usage_dict:
            existing = usage_dict[usage.model]
            existing.add_tokens(usage.input_tokens, usage.output_tokens, usage.input_cache_tokens)
            existing.add_images(usage.generated_images)
        else:
            usage_dict[usage.model] = Usage(
                model=usage.model,
                input_tokens=usage.input_tokens,
                output_tokens=usage.output_tokens,
                total_tokens=usage.total_tokens,
                input_cache_tokens=usage.input_cache_tokens,
                generated_images=usage.generated_images
            )
    
    return list(usage_dict.values())


@dataclass(kw_only=True)
class GraphNodeParam:
    type: str = None
    params: dict = field(default_factory=dict)
    kwargs: dict = field(default_factory=dict)

    def add_param(self, key: str, value: Any):
        self.params[key] = value

    def get_param(self, key: str, default: Any = None):
        return self.params[key] if self.params[key] else default

    def __str__(self) -> str:
        return json.dumps(self.to_dict(), ensure_ascii=False)

    def to_dict(self) -> dict:
        return {
            "type": self.type,
            "params": self.params,
            "kwargs": self.kwargs
        }

    def __json__(self) -> dict:
        return self.to_dict()


@dataclass(kw_only=True)
class Usage:
    model: str
    input_tokens: int = 0
    output_tokens: int = 0
    total_tokens: int = 0
    input_cache_tokens: int = 0
    generated_images: int = 0

    def add_tokens(self, input_tokens: int, output_tokens: int, input_cache_tokens: int = 0):
        input_tokens = int(input_tokens or 0)
        output_tokens = int(output_tokens or 0)
        input_cache_tokens = int(input_cache_tokens or 0)

        self.input_tokens += input_tokens
        self.output_tokens += output_tokens
        self.input_cache_tokens += input_cache_tokens
        self.total_tokens = self.input_tokens + self.output_tokens

    def add_images(self, image_count: int):
        image_count = int(image_count or 0)
        self.generated_images += image_count

    def to_dict(self) -> dict:
        return {
            "model": self.model,
            "input_tokens": self.input_tokens,
            "output_tokens": self.output_tokens,
            "total_tokens": self.total_tokens,
            "input_cache_tokens": self.input_cache_tokens,
            "generated_images": self.generated_images
        }

    def __json__(self) -> dict:
        return self.to_dict()


@dataclass(kw_only=True)
class GraphState:
    messages: Annotated[List[BaseMessage], add_messages] = field(default_factory=list)
    loop_nodes: List[GraphNodeParam] = field(default_factory=list)
    goto: GraphNodeParam = field(default_factory=GraphNodeParam)
    force_goto: Annotated[GraphNodeParam, add_force_goto] = field(default_factory=GraphNodeParam)
    loop_retry: int = 0
    recursion_steps: int = 0
    total_time: int = 0
    usages: Annotated[List[Usage], add_usages] = field(default_factory=list)
    prefix_executed: bool = False
    is_task_done: bool = False
    template_type: str = None
    ext: dict = field(default_factory=dict)
    start_time: int = field(default_factory=lambda: int(time.time() * 1000))

    def add_usage(self, model: str, input_tokens: int, output_tokens: int, input_cache_tokens: int = 0,
                  generated_images: int = 0):
        try:
            input_tokens = int(input_tokens or 0)
            output_tokens = int(output_tokens or 0)
            input_cache_tokens = int(input_cache_tokens or 0)
            generated_images = int(generated_images or 0)

            for usage in self.usages:
                if usage.model == model:
                    usage.add_tokens(input_tokens, output_tokens, input_cache_tokens)
                    if generated_images > 0:
                        usage.add_images(generated_images)
                    return

            new_usage = Usage(
                model=model,
                input_tokens=input_tokens,
                output_tokens=output_tokens,
                total_tokens=input_tokens + output_tokens,
                input_cache_tokens=input_cache_tokens,
                generated_images=generated_images
            )
            self.usages.append(new_usage)
        except Exception as e:
            print(f"Error adding usage: {e}")

    def add_usage_meta(self, model: str, usage_metadata: Dict[str, Any]):
        """Add usage from usage_metadata dict"""
        try:
            input_tokens = usage_metadata.get('input_tokens', 0) or 0
            output_tokens = usage_metadata.get('output_tokens', 0) or 0

            if input_tokens == 0:
                input_tokens = usage_metadata.get('prompt_tokens', 0) or 0

            if output_tokens == 0:
                output_tokens = usage_metadata.get('completion_tokens', 0) or 0

            if input_tokens == 0 and 'total_tokens' in usage_metadata and output_tokens > 0:
                total_tokens = usage_metadata.get('total_tokens', 0) or 0
                input_tokens = total_tokens - output_tokens

            input_cache_tokens = 0
            input_token_details = usage_metadata.get('input_token_details', {})
            if isinstance(input_token_details, dict):
                input_cache_tokens = input_token_details.get('cache_read', 0) or 0

            if input_cache_tokens == 0:
                prompt_tokens_details = usage_metadata.get('prompt_tokens_details', {})
                if isinstance(prompt_tokens_details, dict):
                    input_cache_tokens = prompt_tokens_details.get('cached_tokens', 0) or 0

            generated_images = usage_metadata.get('generated_images', 0) or 0

            self.add_usage(model, input_tokens, output_tokens, input_cache_tokens, generated_images)
        except Exception as e:
            print(f"Error adding usage metadata: {e}")

    def add_image_usage(self, model: str, generated_images: int):
        try:
            generated_images = int(generated_images or 0)

            for usage in self.usages:
                if usage.model == model:
                    usage.add_images(generated_images)
                    return

            new_usage = Usage(
                model=model,
                input_tokens=0,
                output_tokens=0,
                total_tokens=0,
                input_cache_tokens=0,
                generated_images=generated_images
            )
            self.usages.append(new_usage)
        except Exception as e:
            print(f"Error adding image usage: {e}")

    def get_usages(self):
        if not self.usages:
            return []
        return list(map(lambda x: x.__dict__, self.usages))


@dataclass(kw_only=True)
class GraphRequest:
    question: str = ""
    image: str | List = ""
    thread_id: str = field()
    history: List[AnyMessage] = field(default_factory=list)
    kwargs: dict = field(default_factory=dict)
    context: str = ""
    mcp_tools: str = ""
    web_search: bool = True
    agent_name: str = None
    verbose: bool = False

    @classmethod
    def from_runnable_config(
        cls, config: Optional[RunnableConfig] = None
    ) -> GraphRequest:
        """Load configuration w/ defaults for the given invocation."""
        config = ensure_config(config)
        configurable = config.get("configurable") or {}
        _fields = {f.name for f in fields(cls) if f.init}
        return cls(**{k: v for k, v in configurable.items() if k in _fields})


class AgentStepLogType(Enum):
    ANSWER = "answer"
    TOOL = "tool"
    SUBAGENT = "subagent"
    LOG = "log"
    USAGE = "usage"


@dataclass
class AgentStepLog:
    output: Union[str, List[Dict[str, Any]], GraphNodeParam] = None
    meta: Dict[str, Any] = field(default_factory=dict)
    type: str = AgentStepLogType.ANSWER.value
    response_id: str = None
    response_content: str = None

    @classmethod
    def build_answer(cls, chunk: Union[AddableDict, Dict, str], meta: Dict[str, Any] = {}, finish: bool = False,
                     response_id: str = None, response_content: str = None) -> "AgentStepLog":
        chunk = "" if not chunk else chunk
        meta = meta or {}
        if "finish" not in meta:
            meta["finish"] = finish
        if (isinstance(chunk, AddableDict) or isinstance(chunk, Dict)) and "output" in chunk:
            output = chunk["output"]
            if AGENT_FINAL_ANSWER_STR in output:
                output = get_action_final_answer(output)
            return cls(output=output, meta=meta, type=AgentStepLogType.ANSWER.value, response_id=response_id,
                       response_content=response_content)
        elif isinstance(chunk, str):
            return cls(output=chunk, meta=meta, type=AgentStepLogType.ANSWER.value, response_id=response_id,
                       response_content=response_content)

    @classmethod
    def _build_action_base(cls, action: str, output: Any, step_type: AgentStepLogType, meta: Dict[str, Any] = None,
                           finish: bool = False, response_id: str = None,
                           response_content: str = None) -> "AgentStepLog":
        meta = meta or {}
        meta["action"] = action
        if "finish" not in meta:
            meta["finish"] = finish
        return cls(output=output, type=step_type.value, meta=meta, response_id=response_id,
                   response_content=response_content)

    @classmethod
    def build_tool_action(cls, action: str, output: Any = "", meta: Dict[str, Any] = None, finish: bool = False,
                          response_id: str = None, response_content: str = None) -> "AgentStepLog":
        return cls._build_action_base(action, output, AgentStepLogType.TOOL, meta, finish, response_id,
                                      response_content=response_content)

    @classmethod
    def build_subagent_action(cls, action: str, output: Any = "", meta: Dict[str, Any] = None, finish: bool = False,
                              response_id: str = None, response_content: str = None) -> "AgentStepLog":
        return cls._build_action_base(action, output, AgentStepLogType.SUBAGENT, meta, finish, response_id,
                                      response_content=response_content)

    @classmethod
    def build_log_action(cls, action: str, output: Any = "", meta: Dict[str, Any] = None, finish: bool = False,
                         response_id: str = None) -> "AgentStepLog":
        return cls._build_action_base(action, output, AgentStepLogType.LOG, meta, finish, response_id)

    @classmethod
    def build_usage_action(cls, usage: List[Dict] = [], meta: Dict[str, Any] = None, finish: bool = False,
                           response_id: str = None, response_content: str = None) -> "AgentStepLog":
        meta = meta or {}
        meta["usages"] = usage
        return cls._build_action_base("", "", AgentStepLogType.USAGE, meta, finish, response_id, response_content)

    def __str__(self):
        output_dict = self.__dict__.copy()
        if isinstance(output_dict["output"], GraphNodeParam):
            output_dict["output"] = output_dict["output"].to_dict()
        return json.dumps(output_dict, indent=2, ensure_ascii=False)
