import logging
import os

from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, Settings
from llama_index.core.postprocessor import SimilarityPostprocessor
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.llms.dashscope import DashScope

logging.basicConfig(level=logging.DEBUG)
print('初始化模型对象')
llm_dash = DashScope(
    model="qwen-turbo",  # 使用通义千问Max模型 qwen-max
    api_key=os.getenv("DASHSCOPE_API_KEY"),  # 从阿里云控制台获取
    temperature=0.3
)
embed_model_zh = HuggingFaceEmbedding(
    model_name="BAAI/bge-small-zh-v1.5",  # 中文优化模型
    device="cpu"
)
# 配置全局设置
Settings.embed_model = embed_model_zh
Settings.llm = llm_dash

data_dir = 'D:/pythonRoom/llm_llama/data2'
documents = SimpleDirectoryReader(input_dir=data_dir).load_data()
vector_index = VectorStoreIndex.from_documents(documents)
vector_index.storage_context.persist(persist_dir="./storage")  # 保存索引

retriever = vector_index.as_retriever()
result = retriever.retrieve("全职儿女")
print('检索结果:')
for item in result:
    print(f"==>相似度: {item.score}=>,{item.text} [元数据: {item.metadata}]")

pp = SimilarityPostprocessor(
    nodes=result,
    similarity_cutoff=0.50
)
remaining_nodes = pp.postprocess_nodes(result)

print('后处理器-检索结果:')
for item in remaining_nodes:
    print(f"==>相似度: {item.score}=>,{item.text} [元数据: {item.metadata}]")
