#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author: zyx
@Date: 2024/12/08 12:42
@FileName: rag_wrapper.py
@Description: RAG包装类
"""
import os
from openai import AsyncOpenAI, APIConnectionError, RateLimitError, Timeout
from dotenv import load_dotenv, find_dotenv
from tenacity import (
    retry,
    stop_after_attempt,
    wait_exponential,
    retry_if_exception_type,
    RetryError
)

class ChromaVectorRAGWrapper:

    def __init__(self, knowledge_path: str):
        from utils import get_vectordb
        from configs import TAGS_DICT
        self.tag_vectordb = get_vectordb("./datas/tag_chroma/all_tags/", "all_tags")
        self.knowledge_dbs = [
            get_vectordb(knowledge_path + know, know) for know in TAGS_DICT.keys()
        ]

    def query(self, query: str, tag_k: int = 5, know_k=10):
        from utils import get_similar_docs_by_query
        similar_docs = get_similar_docs_by_query(
            query=query,
            tag_vectordb=self.tag_vectordb,
            knowledge_dbs=self.knowledge_dbs,
            tag_k=tag_k,
            know_k=know_k)
        return "".join([doc[0].page_content for doc in similar_docs])

class LightRAGWrapper:
    """对 lightrag 进行封装，方便调用"""
    
    def __init__(
        self, working_dir: str, rag_llm_model="Qwen/Qwen2.5-32B-Instruct"
    ) -> None:
        from lightrag import LightRAG

        load_dotenv(find_dotenv())
        self.working_dir = working_dir
        self.rag_llm_model = rag_llm_model
        self.rag = LightRAG(
            working_dir=self.working_dir, llm_model_func=self.siliconflow_complete
        )

    def add_text(self, text: str):
        self.rag.insert(text)

    def query(self, query: str, mode: str = "naive") -> str:
        from lightrag import QueryParam
        return self.rag.query(query, param=QueryParam(mode=mode))

    @retry(
        stop=stop_after_attempt(100),
        wait=wait_exponential(multiplier=1, min=4, max=120),
        retry=retry_if_exception_type((RateLimitError, APIConnectionError, Timeout, RetryError)),
    )
    async def siliconflow_complete_if_cache(
        self,
        model,
        prompt,
        system_prompt=None,
        history_messages=[],
        **kwargs,
    ) -> str:
        from lightrag.utils import (compute_args_hash)
        from lightrag.base import BaseKVStorage
        openai_async_client = AsyncOpenAI(
            api_key=os.environ["SILICONFLOW_API_KEY"],
            base_url=os.environ["SILICONFLOW_BASE_URL"],
        )
        hashing_kv: BaseKVStorage = kwargs.pop("hashing_kv", None)
        messages = []
        if system_prompt:
            messages.append({"role": "system", "content": system_prompt})
        messages.extend(history_messages)
        messages.append({"role": "user", "content": prompt})
        if hashing_kv is not None:
            args_hash = compute_args_hash(model, messages)
            if_cache_return = await hashing_kv.get_by_id(args_hash)
            if if_cache_return is not None:
                return if_cache_return["return"]
        response = await openai_async_client.chat.completions.create(
            model=model, messages=messages, **kwargs
        )
        content = response.choices[0].message.content
        if r"\u" in content:
            content = content.encode("utf-8").decode("unicode_escape")
        if hashing_kv is not None:
            await hashing_kv.upsert(
                {
                    args_hash: {
                        "return": response.choices[0].message.content,
                        "model": model,
                    }
                }
            )
        return content

    async def siliconflow_complete(
        self,
        prompt,
        system_prompt=None,
        history_messages=[],
        keyword_extraction=False,
        **kwargs,
    ) -> str:
        """
        使用基于知识图谱的RAG构建过程极度消耗 token，这里使用价格更加便宜的基于 siliconflow 的开源大模型
        官方网址: https://cloud.siliconflow.cn/
        经测试：
        Qwen/Qwen2.5-32B-Instruct 以及 Qwen/Qwen2.5-72B-Instruct 模型效果都还不错，推荐使用
        """
        from lightrag.utils import locate_json_string_body_from_string

        keyword_extraction = kwargs.pop("keyword_extraction", None)
        if keyword_extraction:
            kwargs["response_format"] = {"type": "json_object"}
        result = await self.siliconflow_complete_if_cache(
            self.rag_llm_model,
            prompt,
            system_prompt=system_prompt,
            history_messages=history_messages,
            **kwargs,
        )
        if keyword_extraction:
            return locate_json_string_body_from_string(result)
        return result
