from dill import settings
from llama_index.core import Settings
from llama_index.core import VectorStoreIndex
from llama_index.core.llama_dataset import download_llama_dataset
from llama_index.core.llama_pack import download_llama_pack
from llama_index.llms.openllm import OpenLLM
from llama_index.embeddings.huggingface import HuggingFaceEmbedding

Settings.embed_model = HuggingFaceEmbedding(
    # model_name="BAAI/bge-base-en-v1.5"
    model_name = "BAAI/bge-base-zh-v1.5"
)
Settings.llm = OpenLLM(
    model="deepseek-r1:7b", api_base="http://127.0.0.1:11434/v1", api_key="Empty"
)
# download and install dependencies for benchmark dataset
rag_dataset, documents = download_llama_dataset("Uber10KDataset2021", "./Uber10KDataset2021")
# build basic RAG system
index = VectorStoreIndex.from_documents(documents=documents[0:10])
query_engine = index.as_query_engine()

print(query_engine)
