from config.config import ModelConfig
from llama_index.core.indices.struct_store import JSONQueryEngine
# Later, load the index
from llama_index.llms.openai_like import OpenAILike
from llama_index.core import Settings



# Settings.embed_model = HuggingFaceEmbedding(
#     model_name="BAAI/bge-small-en-v1.5"
# )

llm = OpenAILike(
    model=ModelConfig.NAME,
    api_base=ModelConfig.API_BASE,
    api_key=ModelConfig.TOKEN,
    is_chat_model=True,
    is_function_calling_model=True,
)
Settings.llm = llm

with open("docs/tree_schema.json", "r") as f:
    json_schema = f.read()

with open("docs/tree.json", "r") as f:
    json_value = f.read()

print("Loading JSONQueryEngine...")
nl_query_engine = JSONQueryEngine(
    json_value=json_value,
    json_schema=json_schema,
    llm=llm,
)
raw_query_engine = JSONQueryEngine(
    json_value=json_value,
    json_schema=json_schema,
    llm=llm,
    synthesize_response=False,
)

queries = [
    # 核心网络技术
    "如何配置链路聚合",
]

print("Running queries...")

nl_response = nl_query_engine.query(
    queries[0],
)
raw_response = raw_query_engine.query(
    queries[0],
)

print("Q: {}".format(queries[0]))
print("A: {}".format(nl_response))
print("A: {}".format(raw_response))

