from __future__ import annotations

from abc import ABC, abstractmethod
from enum import Enum
import os
from dotenv import load_dotenv
from dataclasses import dataclass, field
from typing import (
    Any,
    Literal,
    TypedDict,
    TypeVar,
    Callable,
)
from .utils import EmbeddingFunc
from .types import KnowledgeGraph

# use the .env that is inside the current folder
# allows to use different .env file for each lightrag instance
# the OS environment variables take precedence over the .env file
load_dotenv(dotenv_path=".env", override=False)


class TextChunkSchema(TypedDict):
    tokens: int
    content: str
    full_doc_id: str
    chunk_order_index: int


T = TypeVar("T")


@dataclass
class QueryParam:
    """LightRAG查询执行配置参数"""

    mode: Literal["local", "global", "hybrid", "naive", "mix", "bypass"] = "global"
    """指定检索模式：
    - "local": 关注上下文相关信息
    - "global": 使用全局知识
    - "hybrid": 结合本地和全局检索
    - "naive": 基础搜索（无高级技术）
    - "mix": 整合知识图谱和向量检索
    """

    only_need_context: bool = False
    """若为True，仅返回检索到的上下文而不生成响应"""

    only_need_prompt: bool = False
    """若为True，仅生成提示词不产生最终响应"""

    response_type: str = "Multiple Paragraphs"
    """定义响应格式，例如：'多段落'、'单段落'、'要点列表'"""

    stream: bool = False
    """是否启用流式输出"""

    top_k: int = int(os.getenv("TOP_K", "60"))
    """检索结果数量，在'local'模式代表实体数量，'global'模式代表关系数量"""

    max_token_for_text_unit: int = int(os.getenv("MAX_TOKEN_TEXT_CHUNK", "4000"))
    """每个文本分块的最大token限制"""

    max_token_for_global_context: int = int(os.getenv("MAX_TOKEN_RELATION_DESC", "4000"))
    """全局检索中关系描述的最大token数"""

    max_token_for_local_context: int = int(os.getenv("MAX_TOKEN_ENTITY_DESC", "4000"))
    """本地检索中实体描述的最大token数"""

    hl_keywords: list[str] = field(default_factory=list)
    """高级关键词列表（检索优先级高）"""

    ll_keywords: list[str] = field(default_factory=list)
    """低级关键词列表（用于细化检索焦点）"""

    conversation_history: list[dict[str, str]] = field(default_factory=list)
    """历史对话记录，格式：[{"role": "用户/助手", "content": "消息"}]"""

    history_turns: int = 3
    """响应中考虑的历史对话轮数"""

    ids: list[str] | None = None
    """结果过滤ID列表"""

    model_func: Callable[..., object] | None = None
    """可覆盖使用的LLM模型函数，允许不同查询模式使用不同模型"""


@dataclass
class StorageNameSpace(ABC):
    namespace: str
    global_config: dict[str, Any]

    async def initialize(self):
        """初始化存储"""
        pass

    async def finalize(self):
        """结束存储操作"""
        pass

    @abstractmethod
    async def index_done_callback(self) -> None:
        """索引完成后的提交操作"""

    @abstractmethod
    async def drop(self) -> dict[str, str]:
        """删除所有存储中的数据并清理资源

            这个抽象方法定义了删除所有存储数据的契约。
            每种存储类型必须实现此方法，以便：
            1. 清除内存和/或外部存储中的所有数据
            2. 移除任何相关的存储文件（如果适用）
            3. 将存储重置为初始状态
            4. 处理任何资源的清理
            5. 如果需要，通知其他进程
            6. 此操作应立即将数据持久化到磁盘。

            返回值:
                dict[str, str]: 操作状态和信息，格式如下：
                    {
                        "status": str,  # "success" 或 "error"
                        "message": str  # 成功时为 "data dropped"，失败时为错误详细信息
                    }

            实现特定:
            - 成功时：返回 {"status": "success", "message": "data dropped"}
            - 失败时：返回 {"status": "error", "message": "<错误详细信息>"}
            - 如果不支持：返回 {"status": "error", "message": "unsupported"}

        """



@dataclass
class BaseVectorStorage(StorageNameSpace, ABC):
    embedding_func: EmbeddingFunc
    cosine_better_than_threshold: float = field(default=0.2)
    meta_fields: set[str] = field(default_factory=set)

    @abstractmethod
    async def query(
        self, query: str, top_k: int, ids: list[str] | None = None
    ) -> list[dict[str, Any]]:
        """查询向量存储并检索前top_k个结果。"""

    @abstractmethod
    async def upsert(self, data: dict[str, dict[str, Any]]) -> None:
        """在存储中插入或更新向量。

        内存存储的重要说明：
        1. 更改将在下次index_done_callback时持久化到磁盘
        2. 在index_done_callback之前，只有一个进程应该更新存储，
           应使用KG-storage-log以避免数据损坏
        """

    @abstractmethod
    async def delete_entity(self, entity_name: str) -> None:
        """通过实体名称删除单个实体。

        内存存储的重要说明：
        1. 更改将在下次index_done_callback时持久化到磁盘
        2. 在index_done_callback之前，只有一个进程应该更新存储，
           应使用KG-storage-log以避免数据损坏
        """

    @abstractmethod
    async def delete_entity_relation(self, entity_name: str) -> None:
        """删除给定实体的关系。

        内存存储的重要说明：
        1. 更改将在下次index_done_callback时持久化到磁盘
        2. 在index_done_callback之前，只有一个进程应该更新存储，
           应使用KG-storage-log以避免数据损坏
        """

    @abstractmethod
    async def get_by_id(self, id: str) -> dict[str, Any] | None:
        """通过ID获取向量数据

        参数:
            id: 向量的唯一标识符

        返回:
            如果找到，返回向量数据；如果未找到，返回None
        """
        pass

    @abstractmethod
    async def get_by_ids(self, ids: list[str]) -> list[dict[str, Any]]:
        """通过IDs获取多个向量数据

        参数:
            ids: 唯一标识符列表

        返回:
            返回找到的向量数据对象列表
        """
        pass

    @abstractmethod
    async def delete(self, ids: list[str]):
        """删除指定ID的向量

        内存存储的重要说明：
        1. 更改将在下次index_done_callback时持久化到磁盘
        2. 在index_done_callback之前，只有一个进程应该更新存储，
           应使用KG-storage-log以避免数据损坏

        参数:
            ids: 要删除的向量ID列表
        """

@dataclass
class BaseKVStorage(StorageNameSpace, ABC):
    embedding_func: EmbeddingFunc

    @abstractmethod
    async def get_by_id(self, id: str) -> dict[str, Any] | None:
        """通过ID获取值"""

    @abstractmethod
    async def get_by_ids(self, ids: list[str]) -> list[dict[str, Any]]:
        """通过ID列表获取值"""

    @abstractmethod
    async def filter_keys(self, keys: set[str]) -> set[str]:
        """返回不存在的键"""

    @abstractmethod
    async def upsert(self, data: dict[str, dict[str, Any]]) -> None:
        """插入或更新数据

        内存存储的重要说明：
        1. 更改将在下次index_done_callback时持久化到磁盘
        2. 更新标志以通知其他进程需要数据持久化
        """

    @abstractmethod
    async def delete(self, ids: list[str]) -> None:
        """通过ID删除存储中的特定记录

        内存存储的重要说明：
        1. 更改将在下次index_done_callback时持久化到磁盘
        2. 更新标志以通知其他进程需要数据持久化

        参数:
            ids (list[str]): 要从存储中删除的文档ID列表

        返回:
            None
        """

    async def drop_cache_by_modes(self, modes: list[str] | None = None) -> bool:
        """根据缓存模式删除存储中的特定记录

        内存存储的重要说明：
        1. 更改将在下次index_done_callback时持久化到磁盘
        2. 更新标志以通知其他进程需要数据持久化

        参数:
            modes (list[str]): 要从存储中删除的缓存模式列表

        返回:
             True: 如果缓存成功删除
             False: 如果缓存删除失败，或者该缓存模式不受支持
        """


@dataclass
class BaseGraphStorage(StorageNameSpace, ABC):
    embedding_func: EmbeddingFunc

    @abstractmethod
    async def has_node(self, node_id: str) -> bool:
        """Check if a node exists in the graph.

        Args:
            node_id: The ID of the node to check

        Returns:
            True if the node exists, False otherwise
        """

    @abstractmethod
    async def has_edge(self, source_node_id: str, target_node_id: str) -> bool:
        """Check if an edge exists between two nodes.

        Args:
            source_node_id: The ID of the source node
            target_node_id: The ID of the target node

        Returns:
            True if the edge exists, False otherwise
        """

    @abstractmethod
    async def node_degree(self, node_id: str) -> int:
        """Get the degree (number of connected edges) of a node.

        Args:
            node_id: The ID of the node

        Returns:
            The number of edges connected to the node
        """

    @abstractmethod
    async def edge_degree(self, src_id: str, tgt_id: str) -> int:
        """Get the total degree of an edge (sum of degrees of its source and target nodes).

        Args:
            src_id: The ID of the source node
            tgt_id: The ID of the target node

        Returns:
            The sum of the degrees of the source and target nodes
        """

    @abstractmethod
    async def get_node(self, node_id: str) -> dict[str, str] | None:
        """Get node by its ID, returning only node properties.

        Args:
            node_id: The ID of the node to retrieve

        Returns:
            A dictionary of node properties if found, None otherwise
        """

    @abstractmethod
    async def get_edge(
        self, source_node_id: str, target_node_id: str
    ) -> dict[str, str] | None:
        """Get edge properties between two nodes.

        Args:
            source_node_id: The ID of the source node
            target_node_id: The ID of the target node

        Returns:
            A dictionary of edge properties if found, None otherwise
        """

    @abstractmethod
    async def get_node_edges(self, source_node_id: str) -> list[tuple[str, str]] | None:
        """Get all edges connected to a node.

        Args:
            source_node_id: The ID of the node to get edges for

        Returns:
            A list of (source_id, target_id) tuples representing edges,
            or None if the node doesn't exist
        """

    async def get_nodes_batch(self, node_ids: list[str]) -> dict[str, dict]:
        """Get nodes as a batch using UNWIND

        Default implementation fetches nodes one by one.
        Override this method for better performance in storage backends
        that support batch operations.
        """
        result = {}
        for node_id in node_ids:
            node = await self.get_node(node_id)
            if node is not None:
                result[node_id] = node
        return result

    async def node_degrees_batch(self, node_ids: list[str]) -> dict[str, int]:
        """Node degrees as a batch using UNWIND

        Default implementation fetches node degrees one by one.
        Override this method for better performance in storage backends
        that support batch operations.
        """
        result = {}
        for node_id in node_ids:
            degree = await self.node_degree(node_id)
            result[node_id] = degree
        return result

    async def edge_degrees_batch(
        self, edge_pairs: list[tuple[str, str]]
    ) -> dict[tuple[str, str], int]:
        """Edge degrees as a batch using UNWIND also uses node_degrees_batch

        Default implementation calculates edge degrees one by one.
        Override this method for better performance in storage backends
        that support batch operations.
        """
        result = {}
        for src_id, tgt_id in edge_pairs:
            degree = await self.edge_degree(src_id, tgt_id)
            result[(src_id, tgt_id)] = degree
        return result

    async def get_edges_batch(
        self, pairs: list[dict[str, str]]
    ) -> dict[tuple[str, str], dict]:
        """Get edges as a batch using UNWIND

        Default implementation fetches edges one by one.
        Override this method for better performance in storage backends
        that support batch operations.
        """
        result = {}
        for pair in pairs:
            src_id = pair["src"]
            tgt_id = pair["tgt"]
            edge = await self.get_edge(src_id, tgt_id)
            if edge is not None:
                result[(src_id, tgt_id)] = edge
        return result

    async def get_nodes_edges_batch(
        self, node_ids: list[str]
    ) -> dict[str, list[tuple[str, str]]]:
        """Get nodes edges as a batch using UNWIND

        Default implementation fetches node edges one by one.
        Override this method for better performance in storage backends
        that support batch operations.
        """
        result = {}
        for node_id in node_ids:
            edges = await self.get_node_edges(node_id)
            result[node_id] = edges if edges is not None else []
        return result

    @abstractmethod
    async def upsert_node(self, node_id: str, node_data: dict[str, str]) -> None:
        """Insert a new node or update an existing node in the graph.

        Importance notes for in-memory storage:
        1. Changes will be persisted to disk during the next index_done_callback
        2. Only one process should updating the storage at a time before index_done_callback,
           KG-storage-log should be used to avoid data corruption

        Args:
            node_id: The ID of the node to insert or update
            node_data: A dictionary of node properties
        """

    @abstractmethod
    async def upsert_edge(
        self, source_node_id: str, target_node_id: str, edge_data: dict[str, str]
    ) -> None:
        """Insert a new edge or update an existing edge in the graph.

        Importance notes for in-memory storage:
        1. Changes will be persisted to disk during the next index_done_callback
        2. Only one process should updating the storage at a time before index_done_callback,
           KG-storage-log should be used to avoid data corruption

        Args:
            source_node_id: The ID of the source node
            target_node_id: The ID of the target node
            edge_data: A dictionary of edge properties
        """

    @abstractmethod
    async def delete_node(self, node_id: str) -> None:
        """Delete a node from the graph.

        Importance notes for in-memory storage:
        1. Changes will be persisted to disk during the next index_done_callback
        2. Only one process should updating the storage at a time before index_done_callback,
           KG-storage-log should be used to avoid data corruption

        Args:
            node_id: The ID of the node to delete
        """

    @abstractmethod
    async def remove_nodes(self, nodes: list[str]):
        """Delete multiple nodes

        Importance notes:
        1. Changes will be persisted to disk during the next index_done_callback
        2. Only one process should updating the storage at a time before index_done_callback,
           KG-storage-log should be used to avoid data corruption

        Args:
            nodes: List of node IDs to be deleted
        """

    @abstractmethod
    async def remove_edges(self, edges: list[tuple[str, str]]):
        """Delete multiple edges

        Importance notes:
        1. Changes will be persisted to disk during the next index_done_callback
        2. Only one process should updating the storage at a time before index_done_callback,
           KG-storage-log should be used to avoid data corruption

        Args:
            edges: List of edges to be deleted, each edge is a (source, target) tuple
        """

    @abstractmethod
    async def get_all_labels(self) -> list[str]:
        """Get all labels in the graph.

        Returns:
            A list of all node labels in the graph, sorted alphabetically
        """

    @abstractmethod
    async def get_knowledge_graph(
        self, node_label: str, max_depth: int = 3, max_nodes: int = 1000
    ) -> KnowledgeGraph:
        """
        Retrieve a connected subgraph of nodes where the label includes the specified `node_label`.

        Args:
            node_label: Label of the starting node，* means all nodes
            max_depth: Maximum depth of the subgraph, Defaults to 3
            max_nodes: Maxiumu nodes to return, Defaults to 1000（BFS if possible)

        Returns:
            KnowledgeGraph object containing nodes and edges, with an is_truncated flag
            indicating whether the graph was truncated due to max_nodes limit
        """


class DocStatus(str, Enum):
    """Document processing status"""

    PENDING = "pending"
    PROCESSING = "processing"
    PROCESSED = "processed"
    FAILED = "failed"


@dataclass
class DocProcessingStatus:
    """Document processing status data structure"""

    content: str
    """Original content of the document"""
    content_summary: str
    """First 100 chars of document content, used for preview"""
    content_length: int
    """Total length of document"""
    file_path: str
    """File path of the document"""
    status: DocStatus
    """Current processing status"""
    created_at: str
    """ISO format timestamp when document was created"""
    updated_at: str
    """ISO format timestamp when document was last updated"""
    chunks_count: int | None = None
    """Number of chunks after splitting, used for processing"""
    error: str | None = None
    """Error message if failed"""
    metadata: dict[str, Any] = field(default_factory=dict)
    """Additional metadata"""


@dataclass
class DocStatusStorage(BaseKVStorage, ABC):
    """Base class for document status storage"""

    @abstractmethod
    async def get_status_counts(self) -> dict[str, int]:
        """Get counts of documents in each status"""

    @abstractmethod
    async def get_docs_by_status(
        self, status: DocStatus
    ) -> dict[str, DocProcessingStatus]:
        """Get all documents with a specific status"""

    async def drop_cache_by_modes(self, modes: list[str] | None = None) -> bool:
        """Drop cache is not supported for Doc Status storage"""
        return False


class StoragesStatus(str, Enum):
    """Storages status"""

    NOT_CREATED = "not_created"
    CREATED = "created"
    INITIALIZED = "initialized"
    FINALIZED = "finalized"
