import os

from llama_index.embeddings.huggingface import HuggingFaceEmbedding

from config import HF_ENDPOINT

print(HF_ENDPOINT)
from llama_index.core import ServiceContext, VectorStoreIndex, Settings
from llama_index.core.embeddings import resolve_embed_model
from llama_index.core.node_parser import SimpleNodeParser
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.llms.ollama import Ollama
from llama_index.readers.web import TrafilaturaWebReader

from utils import display_source_node

docs = TrafilaturaWebReader().load_data(["https://baike.baidu.com/item/ChatGPT/62446358","https://baike.baidu.com/item/恐龙/139019"])
#创建文档切割器
node_parser = SimpleNodeParser.from_defaults(chunk_size=1024)
base_nodes = node_parser.get_nodes_from_documents(docs)

# 创建BAAI的embedding
embed_model = resolve_embed_model("local:BAAI/bge-small-zh-v1.5")
# 创建service_context
# 创建Ollama的llm
Settings.llm = Ollama(base_url="http://172.16.14.212:11434" ,model="qwen2.5:7b", request_timeout=120.0)
Settings.embed_model = HuggingFaceEmbedding(
    model_name="BAAI/bge-small-en-v1.5"
)
Settings.node_parser = node_parser
# 创建index
base_index = VectorStoreIndex(base_nodes,show_progress=True)
# 创建检索器
base_retriever = base_index.as_retriever(similarity_top_k=2)

# 检索相关文档
retrievals = base_retriever.retrieve(
    "恐龙是冷血动物吗？"
)
# https://zhuanlan.zhihu.com/p/675119639
for n in retrievals:
    display_source_node(n, source_length=1500)

query_engine_chunk = RetrieverQueryEngine.from_args(
    base_retriever
)
#openai的回答
response = query_engine_chunk.query(
    "恐龙是冷血动物吗？"
)
print(str(response))