import os
import pickle
import asyncio
import xxhash

import logging
from loguru import logger
import numpy as np
from dataclasses import dataclass
from dotenv import load_dotenv, find_dotenv
from openai import AsyncOpenAI
from time import time

class NanoGraphragWrapper:
    from nano_graphrag.base import BaseVectorStorage
    """对 nono-graphrag 进行封装，支持直接调用"""

    def __init__(self, working_dir: str) -> None:
        load_dotenv(find_dotenv())
        from nano_graphrag import GraphRAG

        logging.getLogger("nano-graphrag").setLevel(logging.INFO)
        self.working_dir = working_dir
        self.model = "deepseek-chat"
        self.rag = GraphRAG(
            working_dir=self.working_dir,
            best_model_func=self.deepseepk_model_if_cache,
            cheap_model_func=self.deepseepk_model_if_cache,
            vector_db_storage_cls=NanoGraphragWrapper.FAISSStorage,
        )

    @dataclass
    class FAISSStorage(BaseVectorStorage):

        def __post_init__(self):
            import faiss

            self._index_file_name = os.path.join(
                self.global_config["working_dir"], f"{self.namespace}_faiss.index"
            )
            self._metadata_file_name = os.path.join(
                self.global_config["working_dir"], f"{self.namespace}_metadata.pkl"
            )
            self._max_batch_size = self.global_config["embedding_batch_num"]
            if os.path.exists(self._index_file_name) and os.path.exists(
                self._metadata_file_name
            ):
                self._index = faiss.read_index(self._index_file_name)
                with open(self._metadata_file_name, "rb") as f:
                    self._metadata = pickle.load(f)
            else:
                self._index = faiss.IndexIDMap(
                    faiss.IndexFlatIP(self.embedding_func.embedding_dim)
                )
                self._metadata = {}

        async def upsert(self, data: dict[str, dict]):
            logger.info(f"Inserting {len(data)} vectors to {self.namespace}")

            contents = [v["content"] for v in data.values()]
            batches = [
                contents[i : i + self._max_batch_size]
                for i in range(0, len(contents), self._max_batch_size)
            ]
            embeddings_list = await asyncio.gather(
                *[self.embedding_func(batch) for batch in batches]
            )
            embeddings = np.concatenate(embeddings_list)

            ids = []
            for k, v in data.items():
                id = xxhash.xxh32_intdigest(k.encode())
                metadata = {k1: v1 for k1, v1 in v.items() if k1 in self.meta_fields}
                metadata["id"] = k
                self._metadata[id] = metadata
                ids.append(id)

            ids = np.array(ids, dtype=np.int64)
            self._index.add_with_ids(embeddings, ids)

            return len(data)

        async def query(self, query, top_k=5):
            embedding = await self.embedding_func([query])
            distances, indices = self._index.search(embedding, top_k)

            results = []
            for _, (distance, id) in enumerate(zip(distances[0], indices[0])):
                if id != -1:  # FAISS returns -1 for empty slots
                    if id in self._metadata:
                        metadata = self._metadata[id]
                        results.append(
                            {**metadata, "distance": 1 - distance}
                        )  # Convert to cosine distance

            return results

        async def index_done_callback(self):
            import faiss

            faiss.write_index(self._index, self._index_file_name)
            with open(self._metadata_file_name, "wb") as f:
                pickle.dump(self._metadata, f)

    def add_text(self, text: str):
        self.rag.insert(text)

    def query(self, query: str, mode: str = "global"):
        from nano_graphrag import QueryParam
        return self.rag.query(query, param=QueryParam(mode=mode))

    async def deepseepk_model_if_cache(
        self, prompt, system_prompt=None, history_messages=[], **kwargs
    ) -> str:
        from nano_graphrag.base import BaseKVStorage
        from nano_graphrag._utils import compute_args_hash
        openai_async_client = AsyncOpenAI(
            api_key=os.environ["DEEPSEEK_API_KEY"],
            base_url=os.environ["DEEPSEEK_BASE_URL"],
        )
        messages = []
        if system_prompt:
            messages.append({"role": "system", "content": system_prompt})

        # Get the cached response if having-------------------
        hashing_kv: BaseKVStorage = kwargs.pop("hashing_kv", None)
        messages.extend(history_messages)
        messages.append({"role": "user", "content": prompt})
        if hashing_kv is not None:
            args_hash = compute_args_hash(self.model, messages)
            if_cache_return = await hashing_kv.get_by_id(args_hash)
            if if_cache_return is not None:
                return if_cache_return["return"]
        # -----------------------------------------------------
        response = await openai_async_client.chat.completions.create(
            model=self.model, messages=messages, **kwargs
        )

        # Cache the response if having-------------------
        if hashing_kv is not None:
            await hashing_kv.upsert(
                {
                    args_hash: {
                        "return": response.choices[0].message.content,
                        "model": self.model,
                    }
                }
            )
        # -----------------------------------------------------
        return response.choices[0].message.content


def test_nano_add_text():
    nano_graph = NanoGraphragWrapper(working_dir="./datas/zhuyuanzhang_test")
    with open("./datas/novel/zhuyuanzhang.txt") as f:
        text = f.read()
    nano_graph.add_text(text)


def test_nano_query_text():
    nano_graph = NanoGraphragWrapper(working_dir="./datas/zhuyuanzhang_test")
    start = time()
    logger.info(nano_graph.query(query="朱元璋的父亲是谁？", mode="global"))
    logger.info(f"global 查询用时: {time()-start:.4f} s")
    start = time()
    logger.info(nano_graph.query(query="朱元璋的父亲是谁？", mode="local"))
    logger.info(f"local 查询用时: {time()-start:.4f} s")


if __name__ == "__main__":
    test_nano_add_text()
    test_nano_query_text()