import os
from lightrag import LightRAG, QueryParam
from lightrag.llm import openai_complete_if_cache
from lightrag.utils import EmbeddingFunc
import numpy as np
from dotenv import load_dotenv
load_dotenv()
from ApiTools import apiBase,apiTools

# from whyhow import WhyHow
# client = WhyHow(api_key=<your WhyHow API key>, base_url="https://api.whyhow.ai")
# workspace = client.workspaces.create(name="Companies")
# chunk = client.chunks.create(
#     workspace_id=workspace.workspace_id,
#     chunks=[Chunk(
#         content="preneur and visionary, Sam Altman serves as the CEO of OpenAI, leading advancements in artifici"
#     )]
# )
# # Feel free to extend with your own triples

# triples = [
#     Triple(
#         head=Node(
#             name="Sam Altman",
#             label="Person",
#             properties={"title": "CEO"}
#         ),
#         relation=Relation(
#             name="runs",
#         ),
#         tail=Node(
#             name="OpenAI",
#             label="Business",
#             properties={"market cap": "$157 Billion"}
#         ),
#         chunk_ids=[c.chunk_id for c in chunk]
#     )
# ]
# add_triples = client.graphs.add_triples(
#     graph_id=graph.graph_id,
#     triples = [
#         Triple(
#             head=Node(
#                 name="Matt Garman",
#                 label="Person",
#                 properties={"title": "CEO"}
#             ),
#             relation=Relation(
#                 name="runs",
#             ),
#             tail=Node(
#                 name="Amazon Web Services",
#                 label="Business",
#                 properties={"operating income": "$10.4 Billion"}
#             )
#         )
#     ]
# )
# graph = client.graphs.create_graph_from_triples(
#     name="Company Graph",
#     workspace_id=workspace.workspace_id,
#     triples=triples
# )
# query = client.graphs.query_unstructured(
#     graph_id=graph.graph_id,
#     query="Who runs OpenAI?"
# )

llm = apiTools.llm
vectdb = apiTools.load_vec()
#########
# Uncomment the below two lines if running in a jupyter notebook to handle the async nature of rag.insert()
import nest_asyncio
nest_asyncio.apply()
#########

WORKING_DIR = "./light_graph"
if not os.path.exists(WORKING_DIR):
    os.mkdir(WORKING_DIR)

async def llm_model_func(
    prompt, system_prompt=None, history_messages=[], **kwargs
) -> str:
    return  apiTools.complete(system_prompt,prompt)

async def embedding_func(texts: list[str]) -> np.ndarray:
    return apiBase.vector_embed(texts)
    
rag = LightRAG(
    working_dir=WORKING_DIR,
    llm_model_func=llm_model_func , # Use gpt_4o_mini_complete LLM model
    embedding_func=EmbeddingFunc(
        embedding_dim=512,
        max_token_size=8192,
        func=embedding_func
    )
)
# Perform naive search
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="naive")))

# Perform local search
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="local")))

# Perform global search
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="global")))

# Perform hybrid search
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid")))