import os
from dotenv import find_dotenv, load_dotenv
from loguru import logger

from openai import AsyncOpenAI, APIConnectionError, RateLimitError, Timeout
from lightrag import LightRAG, QueryParam
from lightrag.base import BaseKVStorage
from lightrag.utils import compute_args_hash, locate_json_string_body_from_string

from tenacity import (
    retry,
    stop_after_attempt,
    wait_exponential,
    retry_if_exception_type,
    RetryError
)

load_dotenv(find_dotenv())

# WORKING_DIR = "../datas/zhuyuanzhang_lightrag"
WORKING_DIR = "../datas/papers_lightrag/"
os.makedirs(WORKING_DIR, exist_ok=True)


async def siliconflow_complete(
    prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
) -> str:
    keyword_extraction = kwargs.pop("keyword_extraction", None)
    if keyword_extraction:
        kwargs["response_format"] = {"type": "json_object"}
    result = await siliconflow_complete_if_cache(
        "Qwen/Qwen2.5-32B-Instruct",
        prompt,
        system_prompt=system_prompt,
        history_messages=history_messages,
        **kwargs,
    )
    if keyword_extraction:
        return locate_json_string_body_from_string(result)
    return result


@retry(
    stop=stop_after_attempt(3),
    wait=wait_exponential(multiplier=1, min=4, max=10),
    retry=retry_if_exception_type((RateLimitError, APIConnectionError, Timeout, RetryError)),
)
async def siliconflow_complete_if_cache(
    model,
    prompt,
    system_prompt=None,
    history_messages=[],
    **kwargs,
) -> str:
    openai_async_client = AsyncOpenAI(
        api_key=os.environ["SILICONFLOW_API_KEY"],
        base_url=os.environ["SILICONFLOW_BASE_URL"],
    )
    hashing_kv: BaseKVStorage = kwargs.pop("hashing_kv", None)
    messages = []
    if system_prompt:
        messages.append({"role": "system", "content": system_prompt})
    messages.extend(history_messages)
    messages.append({"role": "user", "content": prompt})
    if hashing_kv is not None:
        args_hash = compute_args_hash(model, messages)
        if_cache_return = await hashing_kv.get_by_id(args_hash)
        if if_cache_return is not None:
            return if_cache_return["return"]
    response = await openai_async_client.chat.completions.create(
        model=model, messages=messages, **kwargs
    )
    content = response.choices[0].message.content
    if r"\u" in content:
        content = content.encode("utf-8").decode("unicode_escape")
    if hashing_kv is not None:
        await hashing_kv.upsert(
            {args_hash: {"return": response.choices[0].message.content, "model": model}}
        )
    return content

rag = LightRAG(working_dir=WORKING_DIR, llm_model_func=siliconflow_complete)

def test_insert():
    with open("../datas/novel/zhuyuanzhang.txt", encoding="utf-8") as f:
        rag.insert(f.read())


def test_query():
    query = "除了将反应预测任务视为自然语言处理的机器翻译任务，还可以使用什么方式对其进行处理?"
    
    # Perform naive search
    logger.info(f"naive search: {rag.query(query, param=QueryParam(mode='naive'))}")

    # Perform local search
    logger.info(f"local search: {rag.query(query, param=QueryParam(mode='local'))}")

    # Perform global search
    logger.info(f"global search: {rag.query(query, param=QueryParam(mode='global'))}")

    # Perform hybrid search
    logger.info(f"hybrid search: {rag.query(query, param=QueryParam(mode='hybrid'))}")


if __name__ == "__main__":
    # test_insert()
    test_query()
