from llama_index.core import SimpleDirectoryReader, Settings
from llama_index.core import VectorStoreIndex
from llms import deepseek_llm
from embeddings import embed_model_local_bge_small
Settings.llm = deepseek_llm()
Settings.embed_model = embed_model_local_bge_small()
documents_1 = SimpleDirectoryReader(
    input_files=["./data/hangzhou.txt"]
).load_data()
documents_2 = SimpleDirectoryReader(
    input_files=["./data/shenzhen.txt"]
).load_data()


index_1 = VectorStoreIndex.from_documents(documents_1)
index_2 = VectorStoreIndex.from_documents(documents_2)

from llama_index.core.retrievers import QueryFusionRetriever

retriever = QueryFusionRetriever(
    [index_1.as_retriever(similarity_top_k=3), index_2.as_retriever(similarity_top_k=4)],
    similarity_top_k=3,
    num_queries=6,  # set this to 1 to disable query generation
    use_async=True,
    verbose=True,
    # query_gen_prompt="...",  # we could override the query generation prompt here
)

from llama_index.core.query_engine import RetrieverQueryEngine

query_engine = RetrieverQueryEngine.from_args(retriever)
response = query_engine.query("介绍一下杭州和深圳的历史")
print(response)